Release v10.6.2_20241011
diff --git a/Source/CMSIS_RTOS_V2/cmsis_os.h b/Source/CMSIS_RTOS_V2/cmsis_os.h
new file mode 100644
index 0000000..aae5229
--- /dev/null
+++ b/Source/CMSIS_RTOS_V2/cmsis_os.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013-2019 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ----------------------------------------------------------------------
+ *
+ * $Date: 10. January 2017
+ * $Revision: V2.1.0
+ *
+ * Project: CMSIS-RTOS API
+ * Title: cmsis_os.h FreeRTOS header file
+ *---------------------------------------------------------------------------*/
+
+#ifndef CMSIS_OS_H_
+#define CMSIS_OS_H_
+
+#define osCMSIS 0x20001U ///< API version (main[31:16].sub[15:0])
+
+#include "cmsis_os2.h"
+
+#endif // CMSIS_OS_H_
diff --git a/Source/CMSIS_RTOS_V2/cmsis_os2.c b/Source/CMSIS_RTOS_V2/cmsis_os2.c
index bfb3223..85d90fd 100644
--- a/Source/CMSIS_RTOS_V2/cmsis_os2.c
+++ b/Source/CMSIS_RTOS_V2/cmsis_os2.c
@@ -19,12 +19,8 @@
* Purpose: CMSIS RTOS2 wrapper for FreeRTOS
*
*---------------------------------------------------------------------------*/
-
#include <string.h>
-#include "cmsis_os2.h" // ::CMSIS:RTOS2
-#include "cmsis_compiler.h" // Compiler agnostic definitions
-
#include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
#include "task.h" // ARM.FreeRTOS::RTOS:Core
#include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
@@ -34,6 +30,9 @@
#include "freertos_mpool.h" // osMemoryPool definitions
#include "freertos_os2.h" // Configuration check and setup
+#include "cmsis_os2.h" // ::CMSIS:RTOS2
+#include "cmsis_compiler.h" // Compiler agnostic definitions
+
/*---------------------------------------------------------------------------*/
#ifndef __ARM_ARCH_6M__
#define __ARM_ARCH_6M__ 0
@@ -227,20 +226,34 @@
/* Get OS Tick count value */
static uint32_t OS_Tick_GetCount (void) {
+#if (__ARM_ARCH_7A__ == 1U)
+ return (__get_CNTFRQ() - PL1_GetCurrentValue());
+#else
uint32_t load = SysTick->LOAD;
return (load - SysTick->VAL);
+#endif /* __ARM_ARCH_7A__ */
}
#if (configUSE_TICKLESS_IDLE == 0)
/* Get OS Tick overflow status */
static uint32_t OS_Tick_GetOverflow (void) {
+#if (__ARM_ARCH_7A__ == 1U)
+ CNTP_CTL_Type cntp_ctl;
+ cntp_ctl.w = PL1_GetControl();
+ return (cntp_ctl.b.ISTATUS);
+#else
return ((SysTick->CTRL >> 16) & 1U);
+#endif /* __ARM_ARCH_7A__ */
}
#endif
/* Get OS Tick interval */
static uint32_t OS_Tick_GetInterval (void) {
+#if (__ARM_ARCH_7A__ == 1U)
+ return (__get_CNTFRQ() + 1U);
+#else
return (SysTick->LOAD + 1U);
+#endif /* __ARM_ARCH_7A__ */
}
/* ==== Kernel Management Functions ==== */
diff --git a/Source/History.txt b/Source/History.txt
index 30d2610..87e7952 100644
--- a/Source/History.txt
+++ b/Source/History.txt
@@ -1,8 +1,144 @@
Documentation and download available at https://www.FreeRTOS.org/
+Changes between FreeRTOS V10.6.1 and FreeRTOS V10.6.2 released November 29, 2023
+
+ + Add the following improvements to the new MPU wrapper (mpu_wrappers_v2.c)
+ introduced in version 10.6.0:
+ - Introduce Access Control List (ACL) feature to allow the application
+ writer to control an unprivileged task’s access to kernel objects.
+ - Update the system call entry mechanism to only require one Supervisor
+ Call (SVC) instruction.
+ - Wrap parameters for system calls with more than four parameters in a
+ struct to avoid special handling during system call entry.
+ - Fix 2 possible integer overflows.
+ - Convert some asserts to run time parameter checks.
+
+Changes between FreeRTOS V10.6.0 and FreeRTOS V10.6.1 released August 17, 2023
+
+ + Add runtime parameter checks to functions in mpu_wrappers_v2.c file.
+ The same checks are already performed in API implementations using
+ asserts.
+ We thank the following people for their inputs in these changes:
+ - Lan Luo, Zixia Liu of School of Computer Science and Technology,
+ Anhui University of Technology, China.
+ - Xinwen Fu of Department of Computer Science, University of
+ Massachusetts Lowell, USA.
+ - Xinhui Shao, Yumeng Wei, Huaiyu Yan, Zhen Ling of School of
+ Computer Science and Engineering, Southeast University, China.
+
+Changes between FreeRTOS V10.5.1 and FreeRTOS V10.6.0 released July 13, 2023
+
+ + Add a new MPU wrapper that places additional restrictions on unprivileged
+ tasks. The following is the list of changes introduced with the new MPU
+ wrapper:
+
+ 1. Opaque and indirectly verifiable integers for kernel object handles:
+ All the kernel object handles (for example, queue handles) are now
+ opaque integers. Previously object handles were raw pointers.
+ 2. Save the task context in Task Control Block (TCB): When a task is
+ swapped out by the scheduler, the task's context is now saved in its
+ TCB. Previously the task's context was saved on its stack.
+ 3. Execute system calls on a separate privileged only stack: FreeRTOS
+ system calls, which execute with elevated privilege, now use a
+ separate privileged only stack. Previously system calls used the
+ calling task's stack. The application writer can control the size of
+ the system call stack using new configSYSTEM_CALL_STACK_SIZE config
+ macro.
+ 4. Memory bounds checks: FreeRTOS system calls which accept a pointer
+ and de-reference it, now verify that the calling task has required
+ permissions to access the memory location referenced by the pointer.
+ 5. System calls restrictions: The following system calls are no longer
+ available to unprivileged tasks:
+ - vQueueDelete
+ - xQueueCreateMutex
+ - xQueueCreateMutexStatic
+ - xQueueCreateCountingSemaphore
+ - xQueueCreateCountingSemaphoreStatic
+ - xQueueGenericCreate
+ - xQueueGenericCreateStatic
+ - xQueueCreateSet
+ - xQueueRemoveFromSet
+ - xQueueGenericReset
+ - xTaskCreate
+ - xTaskCreateStatic
+ - vTaskDelete
+ - vTaskPrioritySet
+ - vTaskSuspendAll
+ - xTaskResumeAll
+ - xTaskGetHandle
+ - xTaskCallApplicationTaskHook
+ - vTaskList
+ - vTaskGetRunTimeStats
+ - xTaskCatchUpTicks
+ - xEventGroupCreate
+ - xEventGroupCreateStatic
+ - vEventGroupDelete
+ - xStreamBufferGenericCreate
+ - xStreamBufferGenericCreateStatic
+ - vStreamBufferDelete
+ - xStreamBufferReset
+ Also, an unprivileged task can no longer use vTaskSuspend to suspend
+ any task other than itself.
+
+ We thank the following people for their inputs in these enhancements:
+ - David Reiss of Meta Platforms, Inc.
+ - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling
+ of School of Computer Science and Engineering, Southeast University,
+ China.
+ - Xinwen Fu of Department of Computer Science, University of
+ Massachusetts Lowell, USA.
+ - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado
+ Boulder, USA.
+ + Add Cortex-M35P port. Contributed by @urutva.
+ + Add embedded extension (RV32E) support to the IAR RISC-V port.
+ + Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent APIs. Contributed by
+ @chrisnc.
+ + Add APIs to get the application supplied buffers from statically
+ created kernel objects. The following new APIs are added:
+ - xTaskGetStaticBuffers
+ - xQueueGetStaticBuffers
+ - xQueueGenericGetStaticBuffers
+ - xSemaphoreGetStaticBuffer
+ - xEventGroupGetStaticBuffer
+ - xStreamBufferGetStaticBuffers
+ - xMessageBufferGetStaticBuffers
+ These APIs enable the application writer to obtain static buffers from
+ the kernel object and free/reuse them at the time of deletion. Earlier
+ the application writer had to maintain the association of static buffers
+ and the kernel object in the application. Contributed by @Dazza0.
+ + Add Thread Local Storage (TLS) support using picolibc function. Contributed
+ by @keith-packard.
+ + Add configTICK_TYPE_WIDTH_IN_BITS to configure TickType_t data type. As a result,
+ the number of bits in an event group also increases with big data type. Contributed
+ by @Hadatko.
+ + Update eTaskGetState and uxTaskGetSystemState to return eReady for pending ready
+ tasks. Contributed by @Dazza0.
+ + Update heap_4 and heap_5 to add padding only if the resulting block is not
+ already aligned.
+ + Fix the scheduler logic in a couple of places to not preempt a task when an
+ equal priority task becomes ready.
+ + Add macros used in FreeRTOS-Plus libraries. Contributed by @Holden.
+ + Fix clang compiler warnings. Contributed by @phelter.
+ + Add assertions to ARMv8-M ports to detect when FreeRTOS APIs are called from
+ interrupts with priority higher than the configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ Contributed by @urutva.
+ + Add xPortIsInsideInterrupt API to ARM_CM0 ports.
+ + Fix build warning in MSP430X port when large data model is used.
+ + Add the ability to use Cortex-R5 port on the parts without FPU.
+ + Fix build warning in heap implementations on PIC24/dsPIC.
+ + Update interrupt priority asserts for Cortex-M ports so that these do not fire
+ on QEMU which does not implement PRIO bits.
+ + Update ARMv7-M ports to ensure that kernel interrupts run at the lowest priority.
+ configKERNEL_INTERRUPT_PRIORITY is now obsolete for ARMv7-M ports and brings
+ these ports inline with the newer ARMv8-M ports. Contributed by @chrisnc.
+ + Fix build issue in POSIX GCC port on Windows Subsystem for Linux (WSL). Contributed
+ by @jacky309.
+ + Add portMEMORY_BARRIER to Microblaze port. Contributed by @bbain.
+ + Add portPOINTER_SIZE_TYPE definition for ATmega port. Contributed by @jputcu.
+ + Multiple improvements in the CMake support. Contributed by @phelte and @cookpate.
+
Changes between FreeRTOS V10.5.0 and FreeRTOS V10.5.1 released November 16 2022
-
- + Updating the version in the manifest.yml file to be accurate.
+ + Updated the kernel version in manifest and SBOM
Changes between FreeRTOS V10.4.6 and FreeRTOS V10.5.0 released September 16 2022
@@ -65,11 +201,11 @@
The feature can be controlled by setting the configuration option
configUSE_SB_COMPLETED_CALLBACK in FreeRTOSConfig.h. When the option is set to 1,
APIs xStreamBufferCreateWithCallback() or xStreamBufferCreateStaticWithCallback()
- (and likewise APIs for message buffer) can be used to create a stream buffer
+ (and likewise APIs for message buffer) can be used to create a stream buffer
or message buffer instance with application provided callback overrides. When
the option is set to 0, then the default callbacks as defined by
- sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain
- backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The
+ sbSEND_COMPLETED() and sbRECEIVE_COMPLETED() macros are invoked. To maintain
+ backwards compatibility, configUSE_SB_COMPLETED_CALLBACK defaults to 0. The
functionality is currently not supported for MPU enabled ports.
+ Generalize the FreeRTOS's Thread Local Storage (TLS) support so that it
is not tied to newlib and can be used with other c-runtime libraries also.
@@ -3052,6 +3188,3 @@
+ Prevent the call to kbhit() in main.c for debug builds as the debugger
seems to have problems stepping over the call. This if for the PC port
only.
-
-
-
diff --git a/Source/README.md b/Source/README.md
index 52d78dd..dd79eee 100644
--- a/Source/README.md
+++ b/Source/README.md
@@ -1,5 +1,7 @@
+[](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml?query=branch%3Amain+event%3Apush+workflow%3A%22CMock+Unit+Tests%22++)
+[](https://codecov.io/gh/FreeRTOS/FreeRTOS-Kernel)
## Getting started
-This repository contains FreeRTOS kernel source/header files and kernel ports only. This repository is referenced as a submodule in [FreeRTOS/FreeRTOS](https://github.com/FreeRTOS/FreeRTOS) repository, which contains pre-configured demo application projects under ```FreeRTOS/Demo``` directory.
+This repository contains FreeRTOS kernel source/header files and kernel ports only. This repository is referenced as a submodule in [FreeRTOS/FreeRTOS](https://github.com/FreeRTOS/FreeRTOS) repository, which contains pre-configured demo application projects under ```FreeRTOS/Demo``` directory.
The easiest way to use FreeRTOS is to start with one of the pre-configured demo application projects. That way you will have the correct FreeRTOS source files included, and the correct include paths configured. Once a demo application is building and executing you can remove the demo application files, and start to add in your own application source files. See the [FreeRTOS Kernel Quick Start Guide](https://www.FreeRTOS.org/FreeRTOS-quick-start-guide.html) for detailed instructions and other useful links.
@@ -8,7 +10,53 @@
### Getting help
If you have any questions or need assistance troubleshooting your FreeRTOS project, we have an active community that can help on the [FreeRTOS Community Support Forum](https://forums.freertos.org).
-## Cloning this repository
+## To consume FreeRTOS-Kernel
+
+### Consume with CMake
+If using CMake, it is recommended to use this repository using FetchContent.
+Add the following into your project's main or a subdirectory's `CMakeLists.txt`:
+
+- Define the source and version/tag you want to use:
+
+```cmake
+FetchContent_Declare( freertos_kernel
+ GIT_REPOSITORY https://github.com/FreeRTOS/FreeRTOS-Kernel.git
+ GIT_TAG main #Note: Best practice to use specific git-hash or tagged version
+)
+```
+
+- Add a freertos_config library (typically an INTERFACE library) The following assumes the directory structure:
+ - `include/FreeRTOSConfig.h`
+```cmake
+add_library(freertos_config INTERFACE)
+
+target_include_directories(freertos_config SYSTEM
+INTERFACE
+ include
+)
+
+target_compile_definitions(freertos_config
+ INTERFACE
+ projCOVERAGE_TEST=0
+)
+```
+
+- Configure the FreeRTOS-Kernel and make it available
+ - this particular example supports a native and cross-compiled build option.
+
+```cmake
+set( FREERTOS_HEAP "4" CACHE STRING "" FORCE)
+# Select the native compile PORT
+set( FREERTOS_PORT "GCC_POSIX" CACHE STRING "" FORCE)
+# Select the cross-compile PORT
+if (CMAKE_CROSSCOMPILING)
+ set(FREERTOS_PORT "GCC_ARM_CA9" CACHE STRING "" FORCE)
+endif()
+
+FetchContent_MakeAvailable(freertos_kernel)
+```
+
+### Consuming stand-alone - Cloning this repository
To clone using HTTPS:
```
@@ -20,20 +68,33 @@
```
## Repository structure
-- The root of this repository contains the three files that are common to
-every port - list.c, queue.c and tasks.c. The kernel is contained within these
+- The root of this repository contains the three files that are common to
+every port - list.c, queue.c and tasks.c. The kernel is contained within these
three files. croutine.c implements the optional co-routine functionality - which
is normally only used on very memory limited systems.
-- The ```./portable``` directory contains the files that are specific to a particular microcontroller and/or compiler.
+- The ```./portable``` directory contains the files that are specific to a particular microcontroller and/or compiler.
See the readme file in the ```./portable``` directory for more information.
- The ```./include``` directory contains the real time kernel header files.
### Code Formatting
-FreeRTOS files are formatted using the "uncrustify" tool. The configuration file used by uncrustify can be found in the [FreeRTOS/FreeRTOS repository](https://github.com/FreeRTOS/FreeRTOS/blob/main/tools/uncrustify.cfg).
+FreeRTOS files are formatted using the "uncrustify" tool. The configuration file used by uncrustify can be found in the [.github/uncrustify.cfg](.github/uncrustify.cfg) file.
+
+### Line Endings
+File checked into the FreeRTOS-Kernel repository use unix-style LF line endings for the best compatbility with git.
+
+For optmial compatibility with Microsoft Windows tools, it is best to enable the git autocrlf feature. You can eanble this setting for the current repository using the following command:
+```
+git config core.autocrlf true
+```
+
+### Git History Optimizations
+Some commits in this repository perform large refactors which touch many lines and lead to unwanted behavior when using the `git blame` command. You can configure git to ignore the list of large refactor commits in this repository with the followig command:
+```
+git config blame.ignoreRevsFile .git-blame-ignore-revs
+```
### Spelling
-*lexicon.txt* contains words that are not traditionally found in an English dictionary. It is used by the spellchecker to verify the various jargon, variable names, and other odd words used in the FreeRTOS code base. If your pull request fails to pass the spelling and you believe this is a mistake, then add the word to *lexicon.txt*.
+*lexicon.txt* contains words that are not traditionally found in an English dictionary. It is used by the spellchecker to verify the various jargon, variable names, and other odd words used in the FreeRTOS code base. If your pull request fails to pass the spelling and you believe this is a mistake, then add the word to *lexicon.txt*.
Note that only the FreeRTOS Kernel source files are checked for proper spelling, the portable section is ignored.
-
diff --git a/Source/croutine.c b/Source/croutine.c
index aa5ea6f..559276c 100644
--- a/Source/croutine.c
+++ b/Source/croutine.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -66,13 +66,13 @@
* used from within an ISR.
*/
#define prvAddCoRoutineToReadyQueue( pxCRCB ) \
- { \
+ do { \
if( ( pxCRCB )->uxPriority > uxTopCoRoutineReadyPriority ) \
{ \
uxTopCoRoutineReadyPriority = ( pxCRCB )->uxPriority; \
} \
vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ ( pxCRCB )->uxPriority ] ), &( ( pxCRCB )->xGenericListItem ) ); \
- }
+ } while( 0 )
/*
* Utility to ready all the lists used by the scheduler. This is called
diff --git a/Source/event_groups.c b/Source/event_groups.c
index f3e6aff..e337f13 100644
--- a/Source/event_groups.c
+++ b/Source/event_groups.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -46,32 +46,17 @@
* correct privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */
-/* The following bit fields convey control information in a task's event list
- * item value. It is important they don't clash with the
- * taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */
-#if configUSE_16_BIT_TICKS == 1
- #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
- #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
- #define eventWAIT_FOR_ALL_BITS 0x0400U
- #define eventEVENT_BITS_CONTROL_BYTES 0xff00U
-#else
- #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
- #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
- #define eventWAIT_FOR_ALL_BITS 0x04000000UL
- #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
-#endif
-
typedef struct EventGroupDef_t
{
EventBits_t uxEventBits;
- List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */
+ List_t xTasksWaitingForBits; /**< List of tasks waiting for a bit to be set. */
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxEventGroupNumber;
#endif
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
- uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
+ uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif
} EventGroup_t;
@@ -672,6 +657,42 @@
}
/*-----------------------------------------------------------*/
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer )
+ {
+ BaseType_t xReturn;
+ EventGroup_t * pxEventBits = xEventGroup;
+
+ configASSERT( pxEventBits );
+ configASSERT( ppxEventGroupBuffer );
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+ {
+ /* Check if the event group was statically allocated. */
+ if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
+ {
+ *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits;
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ xReturn = pdFALSE;
+ }
+ }
+ #else /* configSUPPORT_DYNAMIC_ALLOCATION */
+ {
+ /* Event group must have been statically allocated. */
+ *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits;
+ xReturn = pdTRUE;
+ }
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+
+ return xReturn;
+ }
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
/* For internal use only - execute a 'set bits' command that was pended from
* an interrupt. */
void vEventGroupSetBitsCallback( void * pvEventGroup,
diff --git a/Source/include/FreeRTOS.h b/Source/include/FreeRTOS.h
index d829d44..4a19f93 100644
--- a/Source/include/FreeRTOS.h
+++ b/Source/include/FreeRTOS.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -55,9 +55,42 @@
#endif
/* *INDENT-ON* */
+/* Acceptable values for configTICK_TYPE_WIDTH_IN_BITS. */
+#define TICK_TYPE_WIDTH_16_BITS 0
+#define TICK_TYPE_WIDTH_32_BITS 1
+#define TICK_TYPE_WIDTH_64_BITS 2
+
/* Application specific configuration options. */
#include "FreeRTOSConfig.h"
+#if !defined( configUSE_16_BIT_TICKS ) && !defined( configTICK_TYPE_WIDTH_IN_BITS )
+ #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#if defined( configUSE_16_BIT_TICKS ) && defined( configTICK_TYPE_WIDTH_IN_BITS )
+ #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+/* Define configTICK_TYPE_WIDTH_IN_BITS according to the
+ * value of configUSE_16_BIT_TICKS for backward compatibility. */
+#ifndef configTICK_TYPE_WIDTH_IN_BITS
+ #if ( configUSE_16_BIT_TICKS == 1 )
+ #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_16_BITS
+ #else
+ #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_32_BITS
+ #endif
+#endif
+
+/* Set configUSE_MPU_WRAPPERS_V1 to 1 to use MPU wrappers v1. */
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* Set configENABLE_ACCESS_CONTROL_LIST to 1 to enable access control list support. */
+#ifndef configENABLE_ACCESS_CONTROL_LIST
+ #define configENABLE_ACCESS_CONTROL_LIST 0
+#endif
+
/* Basic FreeRTOS definitions. */
#include "projdefs.h"
@@ -72,41 +105,26 @@
/* Required if struct _reent is used. */
#if ( configUSE_NEWLIB_REENTRANT == 1 )
-/* Note Newlib support has been included by popular demand, but is not
- * used by the FreeRTOS maintainers themselves. FreeRTOS is not
- * responsible for resulting newlib operation. User must be familiar with
- * newlib and must provide system-wide implementations of the necessary
- * stubs. Be warned that (at the time of writing) the current newlib design
- * implements a system-wide malloc() that must be provided with locks.
- *
- * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
- * for additional information. */
- #include <reent.h>
+ #include "newlib-freertos.h"
- #define configUSE_C_RUNTIME_TLS_SUPPORT 1
-
- #ifndef configTLS_BLOCK_TYPE
- #define configTLS_BLOCK_TYPE struct _reent
- #endif
-
- #ifndef configINIT_TLS_BLOCK
- #define configINIT_TLS_BLOCK( xTLSBlock ) _REENT_INIT_PTR( &( xTLSBlock ) )
- #endif
-
- #ifndef configSET_TLS_BLOCK
- #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock )
- #endif
-
- #ifndef configDEINIT_TLS_BLOCK
- #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) )
- #endif
#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */
+/* Must be defaulted before configUSE_PICOLIBC_TLS is used below. */
+#ifndef configUSE_PICOLIBC_TLS
+ #define configUSE_PICOLIBC_TLS 0
+#endif
+
+#if ( configUSE_PICOLIBC_TLS == 1 )
+
+ #include "picolibc-freertos.h"
+
+#endif /* if ( configUSE_PICOLIBC_TLS == 1 ) */
+
#ifndef configUSE_C_RUNTIME_TLS_SUPPORT
#define configUSE_C_RUNTIME_TLS_SUPPORT 0
#endif
-#if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+#if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
#ifndef configTLS_BLOCK_TYPE
#error Missing definition: configTLS_BLOCK_TYPE must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1.
@@ -123,7 +141,7 @@
#ifndef configDEINIT_TLS_BLOCK
#error Missing definition: configDEINIT_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1.
#endif
-#endif /* if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) */
+#endif /* if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) */
/*
* Check all the required application specific macros have been defined.
@@ -155,8 +173,10 @@
#error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
#endif
-#ifndef configUSE_16_BIT_TICKS
- #error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
+#if ( ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_16_BITS ) && \
+ ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_32_BITS ) && \
+ ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_64_BITS ) )
+ #error Macro configTICK_TYPE_WIDTH_IN_BITS is defined to incorrect value. See the Configuration section of the FreeRTOS API documentation for details.
#endif
#ifndef configUSE_CO_ROUTINES
@@ -1284,7 +1304,7 @@
#if ( configGENERATE_RUN_TIME_STATS == 1 )
configRUN_TIME_COUNTER_TYPE ulDummy16;
#endif
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
configTLS_BLOCK_TYPE xDummy17;
#endif
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
diff --git a/Source/include/StackMacros.h b/Source/include/StackMacros.h
index 099ac0c..006068c 100644
--- a/Source/include/StackMacros.h
+++ b/Source/include/StackMacros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/include/atomic.h b/Source/include/atomic.h
index 8e356e1..033edce 100644
--- a/Source/include/atomic.h
+++ b/Source/include/atomic.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/include/croutine.h b/Source/include/croutine.h
index 48e6f03..df50f87 100644
--- a/Source/include/croutine.h
+++ b/Source/include/croutine.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -53,11 +53,11 @@
typedef struct corCoRoutineControlBlock
{
crCOROUTINE_CODE pxCoRoutineFunction;
- ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */
- ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */
- UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */
- UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */
- uint16_t uxState; /*< Used internally by the co-routine implementation. */
+ ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */
+ ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */
+ UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */
+ UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */
+ uint16_t uxState; /**< Used internally by the co-routine implementation. */
} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */
/**
@@ -307,12 +307,14 @@
* \defgroup crDELAY crDELAY
* \ingroup Tasks
*/
-#define crDELAY( xHandle, xTicksToDelay ) \
- if( ( xTicksToDelay ) > 0 ) \
- { \
- vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \
- } \
- crSET_STATE0( ( xHandle ) );
+#define crDELAY( xHandle, xTicksToDelay ) \
+ do { \
+ if( ( xTicksToDelay ) > 0 ) \
+ { \
+ vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \
+ } \
+ crSET_STATE0( ( xHandle ) ); \
+ } while( 0 )
/**
* @code{c}
@@ -400,7 +402,7 @@
* \ingroup Tasks
*/
#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \
- { \
+ do { \
*( pxResult ) = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), ( xTicksToWait ) ); \
if( *( pxResult ) == errQUEUE_BLOCKED ) \
{ \
@@ -412,7 +414,7 @@
crSET_STATE1( ( xHandle ) ); \
*pxResult = pdPASS; \
} \
- }
+ } while( 0 )
/**
* croutine. h
@@ -494,7 +496,7 @@
* \ingroup Tasks
*/
#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \
- { \
+ do { \
*( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), ( xTicksToWait ) ); \
if( *( pxResult ) == errQUEUE_BLOCKED ) \
{ \
@@ -506,7 +508,7 @@
crSET_STATE1( ( xHandle ) ); \
*( pxResult ) = pdPASS; \
} \
- }
+ } while( 0 )
/**
* croutine. h
diff --git a/Source/include/deprecated_definitions.h b/Source/include/deprecated_definitions.h
index 1cb9372..6a25d06 100644
--- a/Source/include/deprecated_definitions.h
+++ b/Source/include/deprecated_definitions.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/include/event_groups.h b/Source/include/event_groups.h
index 275f316..a5c723d 100644
--- a/Source/include/event_groups.h
+++ b/Source/include/event_groups.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -36,6 +36,26 @@
/* FreeRTOS includes. */
#include "timers.h"
+/* The following bit fields convey control information in a task's event list
+ * item value. It is important they don't clash with the
+ * taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
+ #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
+ #define eventWAIT_FOR_ALL_BITS 0x0400U
+ #define eventEVENT_BITS_CONTROL_BYTES 0xff00U
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
+ #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
+ #define eventWAIT_FOR_ALL_BITS 0x04000000UL
+ #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
+ #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100000000000000ULL
+ #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200000000000000ULL
+ #define eventWAIT_FOR_ALL_BITS 0x0400000000000000ULL
+ #define eventEVENT_BITS_CONTROL_BYTES 0xff00000000000000ULL
+#endif /* if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) */
+
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
@@ -84,8 +104,8 @@
/*
* The type that holds event bits always matches TickType_t - therefore the
- * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1,
- * 32 bits if set to 0.
+ * number of bits it holds is set by configTICK_TYPE_WIDTH_IN_BITS (16 bits if set to 0,
+ * 32 bits if set to 1, 64 bits if set to 2.
*
* \defgroup EventBits_t EventBits_t
* \ingroup EventGroup
@@ -112,11 +132,12 @@
*
* Although event groups are not related to ticks, for internal implementation
* reasons the number of bits available for use in an event group is dependent
- * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If
- * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
- * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has
- * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store
- * event bits within an event group.
+ * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If
+ * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit
+ * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has
+ * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then
+ * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type
+ * is used to store event bits within an event group.
*
* @return If the event group was created then a handle to the event group is
* returned. If there was insufficient FreeRTOS heap available to create the
@@ -168,11 +189,12 @@
*
* Although event groups are not related to ticks, for internal implementation
* reasons the number of bits available for use in an event group is dependent
- * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If
- * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
- * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has
- * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store
- * event bits within an event group.
+ * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If
+ * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit
+ * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has
+ * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then
+ * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type
+ * is used to store event bits within an event group.
*
* @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type
* StaticEventGroup_t, which will be then be used to hold the event group's data
@@ -761,6 +783,28 @@
*/
void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
+/**
+ * event_groups.h
+ * @code{c}
+ * BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ * StaticEventGroup_t ** ppxEventGroupBuffer );
+ * @endcode
+ *
+ * Retrieve a pointer to a statically created event groups's data structure
+ * buffer. It is the same buffer that is supplied at the time of creation.
+ *
+ * @param xEventGroup The event group for which to retrieve the buffer.
+ *
+ * @param ppxEventGroupBuffer Used to return a pointer to the event groups's
+ * data structure buffer.
+ *
+ * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise.
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
/* For internal use only. */
void vEventGroupSetBitsCallback( void * pvEventGroup,
const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION;
diff --git a/Source/include/list.h b/Source/include/list.h
index 35e4789..62c6238 100644
--- a/Source/include/list.h
+++ b/Source/include/list.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -143,20 +143,20 @@
struct xLIST;
struct xLIST_ITEM
{
- listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
- configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in ascending order. */
- struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */
- struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */
- void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */
- struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */
- listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ configLIST_VOLATILE TickType_t xItemValue; /**< The value being listed. In most cases this is used to sort the list in ascending order. */
+ struct xLIST_ITEM * configLIST_VOLATILE pxNext; /**< Pointer to the next ListItem_t in the list. */
+ struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /**< Pointer to the previous ListItem_t in the list. */
+ void * pvOwner; /**< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */
+ struct xLIST * configLIST_VOLATILE pxContainer; /**< Pointer to the list in which this list item is placed (if any). */
+ listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
};
typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */
#if ( configUSE_MINI_LIST_ITEM == 1 )
struct xMINI_LIST_ITEM
{
- listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
configLIST_VOLATILE TickType_t xItemValue;
struct xLIST_ITEM * configLIST_VOLATILE pxNext;
struct xLIST_ITEM * configLIST_VOLATILE pxPrevious;
@@ -171,11 +171,11 @@
*/
typedef struct xLIST
{
- listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ listFIRST_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
volatile UBaseType_t uxNumberOfItems;
- ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */
- MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */
- listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ ListItem_t * configLIST_VOLATILE pxIndex; /**< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */
+ MiniListItem_t xListEnd; /**< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */
+ listSECOND_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
} List_t;
/*
@@ -283,17 +283,17 @@
* \ingroup LinkedList
*/
#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \
- { \
+ do { \
List_t * const pxConstList = ( pxList ); \
/* Increment the index to the next item and return the item, ensuring */ \
/* we don't return the marker used at the end of the list. */ \
( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \
if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \
{ \
- ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \
+ ( pxConstList )->pxIndex = ( pxConstList )->xListEnd.pxNext; \
} \
( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \
- }
+ } while( 0 )
/*
* Version of uxListRemove() that does not return a value. Provided as a slight
@@ -312,7 +312,7 @@
* \ingroup LinkedList
*/
#define listREMOVE_ITEM( pxItemToRemove ) \
- { \
+ do { \
/* The list item knows which list it is in. Obtain the list from the list \
* item. */ \
List_t * const pxList = ( pxItemToRemove )->pxContainer; \
@@ -327,7 +327,7 @@
\
( pxItemToRemove )->pxContainer = NULL; \
( pxList->uxNumberOfItems )--; \
- }
+ } while( 0 )
/*
* Inline version of vListInsertEnd() to provide slight optimisation for
@@ -352,7 +352,7 @@
* \ingroup LinkedList
*/
#define listINSERT_END( pxList, pxNewListItem ) \
- { \
+ do { \
ListItem_t * const pxIndex = ( pxList )->pxIndex; \
\
/* Only effective when configASSERT() is also defined, these tests may catch \
@@ -374,7 +374,7 @@
( pxNewListItem )->pxContainer = ( pxList ); \
\
( ( pxList )->uxNumberOfItems )++; \
- }
+ } while( 0 )
/*
* Access function to obtain the owner of the first entry in a list. Lists
diff --git a/Source/include/message_buffer.h b/Source/include/message_buffer.h
index bb8a7f7..136445d 100644
--- a/Source/include/message_buffer.h
+++ b/Source/include/message_buffer.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -249,6 +249,37 @@
* message_buffer.h
*
* @code{c}
+ * BaseType_t xMessageBufferGetStaticBuffers( MessageBufferHandle_t xMessageBuffer,
+ * uint8_t ** ppucMessageBufferStorageArea,
+ * StaticMessageBuffer_t ** ppxStaticMessageBuffer );
+ * @endcode
+ *
+ * Retrieve pointers to a statically created message buffer's data structure
+ * buffer and storage area buffer. These are the same buffers that are supplied
+ * at the time of creation.
+ *
+ * @param xMessageBuffer The message buffer for which to retrieve the buffers.
+ *
+ * @param ppucMessageBufferStorageArea Used to return a pointer to the
+ * message buffer's storage area buffer.
+ *
+ * @param ppxStaticMessageBuffer Used to return a pointer to the message
+ * buffer's data structure buffer.
+ *
+ * @return pdTRUE if buffers were retrieved, pdFALSE otherwise..
+ *
+ * \defgroup xMessageBufferGetStaticBuffers xMessageBufferGetStaticBuffers
+ * \ingroup MessageBufferManagement
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ #define xMessageBufferGetStaticBuffers( xMessageBuffer, ppucMessageBufferStorageArea, ppxStaticMessageBuffer ) \
+ xStreamBufferGetStaticBuffers( ( xMessageBuffer ), ( ppucMessageBufferStorageArea ), ( ppxStaticMessageBuffer ) )
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * message_buffer.h
+ *
+ * @code{c}
* size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer,
* const void *pvTxData,
* size_t xDataLengthBytes,
diff --git a/Source/include/mpu_prototypes.h b/Source/include/mpu_prototypes.h
index 933794c..574f1da 100644
--- a/Source/include/mpu_prototypes.h
+++ b/Source/include/mpu_prototypes.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -38,21 +38,43 @@
#ifndef MPU_PROTOTYPES_H
#define MPU_PROTOTYPES_H
+typedef struct xTaskGenericNotifyParams
+{
+ TaskHandle_t xTaskToNotify;
+ UBaseType_t uxIndexToNotify;
+ uint32_t ulValue;
+ eNotifyAction eAction;
+ uint32_t * pulPreviousNotificationValue;
+} xTaskGenericNotifyParams_t;
+
+typedef struct xTaskGenericNotifyWaitParams
+{
+ UBaseType_t uxIndexToWaitOn;
+ uint32_t ulBitsToClearOnEntry;
+ uint32_t ulBitsToClearOnExit;
+ uint32_t * pulNotificationValue;
+ TickType_t xTicksToWait;
+} xTaskGenericNotifyWaitParams_t;
+
+typedef struct xTimerGenericCommandParams
+{
+ TimerHandle_t xTimer;
+ BaseType_t xCommandID;
+ TickType_t xOptionalValue;
+ BaseType_t * pxHigherPriorityTaskWoken;
+ TickType_t xTicksToWait;
+} xTimerGenericCommandParams_t;
+
+typedef struct xEventGroupWaitBitsParams
+{
+ EventGroupHandle_t xEventGroup;
+ EventBits_t uxBitsToWaitFor;
+ BaseType_t xClearOnExit;
+ BaseType_t xWaitForAllBits;
+ TickType_t xTicksToWait;
+} xEventGroupWaitBitsParams_t;
+
/* MPU versions of task.h API functions. */
-BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode,
- const char * const pcName,
- const uint16_t usStackDepth,
- void * const pvParameters,
- UBaseType_t uxPriority,
- TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
- const char * const pcName,
- const uint32_t ulStackDepth,
- void * const pvParameters,
- UBaseType_t uxPriority,
- StackType_t * const puxStackBuffer,
- StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL;
@@ -63,17 +85,11 @@
TaskStatus_t * pxTaskStatus,
BaseType_t xGetFreeStackSpace,
eTaskState eState ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskPrioritySet( TaskHandle_t xTask,
- UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL;
char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
@@ -84,26 +100,26 @@
void * pvValue ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
BaseType_t xIndex ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
- void * pvParameter ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
const UBaseType_t uxArraySize,
configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL;
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL;
configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
UBaseType_t uxIndexToNotify,
uint32_t ulValue,
eNotifyAction eAction,
uint32_t * pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL;
+BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
uint32_t ulBitsToClearOnEntry,
uint32_t ulBitsToClearOnExit,
uint32_t * pulNotificationValue,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
BaseType_t xClearCountOnExit,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
@@ -112,14 +128,87 @@
uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
UBaseType_t uxIndexToClear,
uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL;
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) FREERTOS_SYSTEM_CALL;
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+
+ BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint16_t usStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL;
+ TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint32_t ulStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t * const puxStackBuffer,
+ StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL;
+ void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL;
+ void MPU_vTaskPrioritySet( TaskHandle_t xTask,
+ UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL;
+ TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
+ void * pvParameter ) FREERTOS_SYSTEM_CALL;
+ void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
+ void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
+ void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL;
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint16_t usStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
+ TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint32_t ulStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t * const puxStackBuffer,
+ StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION;
+ void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
+ void MPU_vTaskPrioritySet( TaskHandle_t xTask,
+ UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION;
+ TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION;
+ BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
+ void * pvParameter ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
+void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
+ const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION;
+UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/* MPU versions of queue.h API functions. */
BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
@@ -136,15 +225,6 @@
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
- StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
- const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
- const UBaseType_t uxInitialCount,
- StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
@@ -153,70 +233,134 @@
const char * pcName ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength,
- const UBaseType_t uxItemSize,
- const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
- const UBaseType_t uxItemSize,
- uint8_t * pucQueueStorage,
- StaticQueue_t * pxStaticQueue,
- const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
- QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
- BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue,
UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+
+ void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
+ StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
+ QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t * pucQueueStorage,
+ StaticQueue_t * pxStaticQueue,
+ const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
+ QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
+ BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL;
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ void MPU_vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
+ StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+ QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t * pucQueueStorage,
+ StaticQueue_t * pxStaticQueue,
+ const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+ QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION;
+ BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+ BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
+ BaseType_t xNewQueue ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION;
+QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+
/* MPU versions of timers.h API functions. */
-TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
- const TickType_t xTimerPeriodInTicks,
- const UBaseType_t uxAutoReload,
- void * const pvTimerID,
- TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL;
-TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
- const TickType_t xTimerPeriodInTicks,
- const UBaseType_t uxAutoReload,
- void * const pvTimerID,
- TimerCallbackFunction_t pxCallbackFunction,
- StaticTimer_t * pxTimerBuffer ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
void * pvNewID ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend,
- void * pvParameter1,
- uint32_t ulParameter2,
- TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
- const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
-UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
const BaseType_t xCommandID,
const TickType_t xOptionalValue,
BaseType_t * const pxHigherPriorityTaskWoken,
const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION;
+TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t * pxTimerBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION;
/* MPU versions of event_group.h API functions. */
-EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL;
-EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToWaitFor,
const BaseType_t xClearOnExit,
const BaseType_t xWaitForAllBits,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
@@ -225,8 +369,37 @@
const EventBits_t uxBitsToSet,
const EventBits_t uxBitsToWaitFor,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL;
-UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+#if ( configUSE_TRACE_FACILITY == 1 )
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL;
+#endif /* #if ( configUSE_TRACE_FACILITY == 1 ) */
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+
+ EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL;
+ EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL;
+ void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL;
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ EventGroupHandle_t MPU_xEventGroupCreate( void ) PRIVILEGED_FUNCTION;
+ EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) PRIVILEGED_FUNCTION;
+ void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
/* MPU versions of message/stream_buffer.h API functions. */
size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
@@ -237,28 +410,67 @@
void * pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL;
-StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
- size_t xTriggerLevelBytes,
- BaseType_t xIsMessageBuffer,
- StreamBufferCallbackFunction_t pxSendCompletedCallback,
- StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
-StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
- size_t xTriggerLevelBytes,
- BaseType_t xIsMessageBuffer,
- uint8_t * const pucStreamBufferStorageArea,
- StaticStreamBuffer_t * const pxStaticStreamBuffer,
- StreamBufferCallbackFunction_t pxSendCompletedCallback,
- StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xStreamBufferType,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xStreamBufferType,
+ uint8_t * const pucStreamBufferStorageArea,
+ StaticStreamBuffer_t * const pxStaticStreamBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
+ void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+ BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xStreamBufferType,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xStreamBufferType,
+ uint8_t * const pucStreamBufferStorageArea,
+ StaticStreamBuffer_t * const pxStaticStreamBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
+ void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+ BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers,
+ uint8_t * ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t * ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION;
+size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
#endif /* MPU_PROTOTYPES_H */
diff --git a/Source/include/mpu_syscall_numbers.h b/Source/include/mpu_syscall_numbers.h
new file mode 100644
index 0000000..03c3ebd
--- /dev/null
+++ b/Source/include/mpu_syscall_numbers.h
@@ -0,0 +1,106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef MPU_SYSCALL_NUMBERS_H
+#define MPU_SYSCALL_NUMBERS_H
+
+/* Numbers assigned to various system calls. */
+#define SYSTEM_CALL_xTaskGenericNotify 0
+#define SYSTEM_CALL_xTaskGenericNotifyWait 1
+#define SYSTEM_CALL_xTimerGenericCommand 2
+#define SYSTEM_CALL_xEventGroupWaitBits 3
+#define SYSTEM_CALL_xTaskDelayUntil 4
+#define SYSTEM_CALL_xTaskAbortDelay 5
+#define SYSTEM_CALL_vTaskDelay 6
+#define SYSTEM_CALL_uxTaskPriorityGet 7
+#define SYSTEM_CALL_eTaskGetState 8
+#define SYSTEM_CALL_vTaskGetInfo 9
+#define SYSTEM_CALL_xTaskGetIdleTaskHandle 10
+#define SYSTEM_CALL_vTaskSuspend 11
+#define SYSTEM_CALL_vTaskResume 12
+#define SYSTEM_CALL_xTaskGetTickCount 13
+#define SYSTEM_CALL_uxTaskGetNumberOfTasks 14
+#define SYSTEM_CALL_pcTaskGetName 15
+#define SYSTEM_CALL_ulTaskGetRunTimeCounter 16
+#define SYSTEM_CALL_ulTaskGetRunTimePercent 17
+#define SYSTEM_CALL_ulTaskGetIdleRunTimePercent 18
+#define SYSTEM_CALL_ulTaskGetIdleRunTimeCounter 19
+#define SYSTEM_CALL_vTaskSetApplicationTaskTag 20
+#define SYSTEM_CALL_xTaskGetApplicationTaskTag 21
+#define SYSTEM_CALL_vTaskSetThreadLocalStoragePointer 22
+#define SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer 23
+#define SYSTEM_CALL_uxTaskGetSystemState 24
+#define SYSTEM_CALL_uxTaskGetStackHighWaterMark 25
+#define SYSTEM_CALL_uxTaskGetStackHighWaterMark2 26
+#define SYSTEM_CALL_xTaskGetCurrentTaskHandle 27
+#define SYSTEM_CALL_xTaskGetSchedulerState 28
+#define SYSTEM_CALL_vTaskSetTimeOutState 29
+#define SYSTEM_CALL_xTaskCheckForTimeOut 30
+#define SYSTEM_CALL_ulTaskGenericNotifyTake 31
+#define SYSTEM_CALL_xTaskGenericNotifyStateClear 32
+#define SYSTEM_CALL_ulTaskGenericNotifyValueClear 33
+#define SYSTEM_CALL_xQueueGenericSend 34
+#define SYSTEM_CALL_uxQueueMessagesWaiting 35
+#define SYSTEM_CALL_uxQueueSpacesAvailable 36
+#define SYSTEM_CALL_xQueueReceive 37
+#define SYSTEM_CALL_xQueuePeek 38
+#define SYSTEM_CALL_xQueueSemaphoreTake 39
+#define SYSTEM_CALL_xQueueGetMutexHolder 40
+#define SYSTEM_CALL_xQueueTakeMutexRecursive 41
+#define SYSTEM_CALL_xQueueGiveMutexRecursive 42
+#define SYSTEM_CALL_xQueueSelectFromSet 43
+#define SYSTEM_CALL_xQueueAddToSet 44
+#define SYSTEM_CALL_vQueueAddToRegistry 45
+#define SYSTEM_CALL_vQueueUnregisterQueue 46
+#define SYSTEM_CALL_pcQueueGetName 47
+#define SYSTEM_CALL_pvTimerGetTimerID 48
+#define SYSTEM_CALL_vTimerSetTimerID 49
+#define SYSTEM_CALL_xTimerIsTimerActive 50
+#define SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle 51
+#define SYSTEM_CALL_pcTimerGetName 52
+#define SYSTEM_CALL_vTimerSetReloadMode 53
+#define SYSTEM_CALL_xTimerGetReloadMode 54
+#define SYSTEM_CALL_uxTimerGetReloadMode 55
+#define SYSTEM_CALL_xTimerGetPeriod 56
+#define SYSTEM_CALL_xTimerGetExpiryTime 57
+#define SYSTEM_CALL_xEventGroupClearBits 58
+#define SYSTEM_CALL_xEventGroupSetBits 59
+#define SYSTEM_CALL_xEventGroupSync 60
+#define SYSTEM_CALL_uxEventGroupGetNumber 61
+#define SYSTEM_CALL_vEventGroupSetNumber 62
+#define SYSTEM_CALL_xStreamBufferSend 63
+#define SYSTEM_CALL_xStreamBufferReceive 64
+#define SYSTEM_CALL_xStreamBufferIsFull 65
+#define SYSTEM_CALL_xStreamBufferIsEmpty 66
+#define SYSTEM_CALL_xStreamBufferSpacesAvailable 67
+#define SYSTEM_CALL_xStreamBufferBytesAvailable 68
+#define SYSTEM_CALL_xStreamBufferSetTriggerLevel 69
+#define SYSTEM_CALL_xStreamBufferNextMessageLengthBytes 70
+#define NUM_SYSTEM_CALLS 71 /* Total number of system calls. */
+
+#endif /* MPU_SYSCALL_NUMBERS_H */
diff --git a/Source/include/mpu_wrappers.h b/Source/include/mpu_wrappers.h
index af06ab3..e61514f 100644
--- a/Source/include/mpu_wrappers.h
+++ b/Source/include/mpu_wrappers.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -47,114 +47,198 @@
*/
/* Map standard task.h API functions to the MPU equivalents. */
- #define xTaskCreate MPU_xTaskCreate
- #define xTaskCreateStatic MPU_xTaskCreateStatic
- #define vTaskDelete MPU_vTaskDelete
- #define vTaskDelay MPU_vTaskDelay
- #define xTaskDelayUntil MPU_xTaskDelayUntil
- #define xTaskAbortDelay MPU_xTaskAbortDelay
- #define uxTaskPriorityGet MPU_uxTaskPriorityGet
- #define eTaskGetState MPU_eTaskGetState
- #define vTaskGetInfo MPU_vTaskGetInfo
- #define vTaskPrioritySet MPU_vTaskPrioritySet
- #define vTaskSuspend MPU_vTaskSuspend
- #define vTaskResume MPU_vTaskResume
- #define vTaskSuspendAll MPU_vTaskSuspendAll
- #define xTaskResumeAll MPU_xTaskResumeAll
- #define xTaskGetTickCount MPU_xTaskGetTickCount
- #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
- #define pcTaskGetName MPU_pcTaskGetName
- #define xTaskGetHandle MPU_xTaskGetHandle
- #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
- #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
- #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
- #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
- #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
- #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
- #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
- #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
- #define uxTaskGetSystemState MPU_uxTaskGetSystemState
- #define vTaskList MPU_vTaskList
- #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats
- #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter
- #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent
- #define xTaskGenericNotify MPU_xTaskGenericNotify
- #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait
- #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake
- #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear
- #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear
- #define xTaskCatchUpTicks MPU_xTaskCatchUpTicks
+ #define vTaskDelay MPU_vTaskDelay
+ #define xTaskDelayUntil MPU_xTaskDelayUntil
+ #define xTaskAbortDelay MPU_xTaskAbortDelay
+ #define uxTaskPriorityGet MPU_uxTaskPriorityGet
+ #define eTaskGetState MPU_eTaskGetState
+ #define vTaskGetInfo MPU_vTaskGetInfo
+ #define vTaskSuspend MPU_vTaskSuspend
+ #define vTaskResume MPU_vTaskResume
+ #define xTaskGetTickCount MPU_xTaskGetTickCount
+ #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
+ #define pcTaskGetName MPU_pcTaskGetName
+ #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
+ #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
+ #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
+ #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
+ #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
+ #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
+ #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
+ #define uxTaskGetSystemState MPU_uxTaskGetSystemState
+ #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter
+ #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent
+ #define xTaskGenericNotify MPU_xTaskGenericNotify
+ #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait
+ #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake
+ #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear
+ #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear
+ #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
+ #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
+ #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
+ #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
- #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
- #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
- #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
- #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define ulTaskGetRunTimeCounter MPU_ulTaskGetRunTimeCounter
+ #define ulTaskGetRunTimePercent MPU_ulTaskGetRunTimePercent
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ /* These are not needed in v2 because they do not take a task
+ * handle and therefore, no lookup is needed. Needed in v1 because
+ * these are available as system calls in v1. */
+ #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats
+ #define vTaskList MPU_vTaskList
+ #define vTaskSuspendAll MPU_vTaskSuspendAll
+ #define xTaskCatchUpTicks MPU_xTaskCatchUpTicks
+ #define xTaskResumeAll MPU_xTaskResumeAll
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ #define xTaskCreate MPU_xTaskCreate
+ #define xTaskCreateStatic MPU_xTaskCreateStatic
+ #define vTaskDelete MPU_vTaskDelete
+ #define vTaskPrioritySet MPU_vTaskPrioritySet
+ #define xTaskGetHandle MPU_xTaskGetHandle
+ #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xTaskCreateRestricted MPU_xTaskCreateRestricted
+ #define xTaskCreateRestrictedStatic MPU_xTaskCreateRestrictedStatic
+ #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions
+ #define xTaskGetStaticBuffers MPU_xTaskGetStaticBuffers
+ #define uxTaskPriorityGetFromISR MPU_uxTaskPriorityGetFromISR
+ #define xTaskResumeFromISR MPU_xTaskResumeFromISR
+ #define xTaskGetApplicationTaskTagFromISR MPU_xTaskGetApplicationTaskTagFromISR
+ #define xTaskGenericNotifyFromISR MPU_xTaskGenericNotifyFromISR
+ #define vTaskGenericNotifyGiveFromISR MPU_vTaskGenericNotifyGiveFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard queue.h API functions to the MPU equivalents. */
- #define xQueueGenericSend MPU_xQueueGenericSend
- #define xQueueReceive MPU_xQueueReceive
- #define xQueuePeek MPU_xQueuePeek
- #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
- #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
- #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
+ #define xQueueGenericSend MPU_xQueueGenericSend
+ #define xQueueReceive MPU_xQueueReceive
+ #define xQueuePeek MPU_xQueuePeek
+ #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
+ #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
+ #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
+ #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
+ #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
+ #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
+ #define xQueueAddToSet MPU_xQueueAddToSet
+ #define xQueueSelectFromSet MPU_xQueueSelectFromSet
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+ #define vQueueAddToRegistry MPU_vQueueAddToRegistry
+ #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
+ #define pcQueueGetName MPU_pcQueueGetName
+ #endif /* #if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
#define vQueueDelete MPU_vQueueDelete
#define xQueueCreateMutex MPU_xQueueCreateMutex
#define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic
#define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore
#define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic
- #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
- #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
- #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
#define xQueueGenericCreate MPU_xQueueGenericCreate
#define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic
- #define xQueueCreateSet MPU_xQueueCreateSet
- #define xQueueAddToSet MPU_xQueueAddToSet
- #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
- #define xQueueSelectFromSet MPU_xQueueSelectFromSet
#define xQueueGenericReset MPU_xQueueGenericReset
+ #define xQueueCreateSet MPU_xQueueCreateSet
+ #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
- #if ( configQUEUE_REGISTRY_SIZE > 0 )
- #define vQueueAddToRegistry MPU_vQueueAddToRegistry
- #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
- #define pcQueueGetName MPU_pcQueueGetName
- #endif
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xQueueGenericGetStaticBuffers MPU_xQueueGenericGetStaticBuffers
+ #define xQueueGenericSendFromISR MPU_xQueueGenericSendFromISR
+ #define xQueueGiveFromISR MPU_xQueueGiveFromISR
+ #define xQueuePeekFromISR MPU_xQueuePeekFromISR
+ #define xQueueReceiveFromISR MPU_xQueueReceiveFromISR
+ #define xQueueIsQueueEmptyFromISR MPU_xQueueIsQueueEmptyFromISR
+ #define xQueueIsQueueFullFromISR MPU_xQueueIsQueueFullFromISR
+ #define uxQueueMessagesWaitingFromISR MPU_uxQueueMessagesWaitingFromISR
+ #define xQueueGetMutexHolderFromISR MPU_xQueueGetMutexHolderFromISR
+ #define xQueueSelectFromSetFromISR MPU_xQueueSelectFromSetFromISR
+ #endif /* if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard timer.h API functions to the MPU equivalents. */
- #define pvTimerGetTimerID MPU_pvTimerGetTimerID
- #define vTimerSetTimerID MPU_vTimerSetTimerID
- #define xTimerIsTimerActive MPU_xTimerIsTimerActive
- #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle
- #define pcTimerGetName MPU_pcTimerGetName
- #define vTimerSetReloadMode MPU_vTimerSetReloadMode
- #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode
- #define xTimerGetPeriod MPU_xTimerGetPeriod
- #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
- #define xTimerGenericCommand MPU_xTimerGenericCommand
+ #define pvTimerGetTimerID MPU_pvTimerGetTimerID
+ #define vTimerSetTimerID MPU_vTimerSetTimerID
+ #define xTimerIsTimerActive MPU_xTimerIsTimerActive
+ #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle
+ #define xTimerGenericCommand MPU_xTimerGenericCommand
+ #define pcTimerGetName MPU_pcTimerGetName
+ #define vTimerSetReloadMode MPU_vTimerSetReloadMode
+ #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode
+ #define xTimerGetPeriod MPU_xTimerGetPeriod
+ #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xTimerGetReloadMode MPU_xTimerGetReloadMode
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xTimerCreate MPU_xTimerCreate
+ #define xTimerCreateStatic MPU_xTimerCreateStatic
+ #define xTimerGetStaticBuffer MPU_xTimerGetStaticBuffer
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard event_group.h API functions to the MPU equivalents. */
- #define xEventGroupCreate MPU_xEventGroupCreate
- #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
- #define xEventGroupWaitBits MPU_xEventGroupWaitBits
- #define xEventGroupClearBits MPU_xEventGroupClearBits
- #define xEventGroupSetBits MPU_xEventGroupSetBits
- #define xEventGroupSync MPU_xEventGroupSync
- #define vEventGroupDelete MPU_vEventGroupDelete
+ #define xEventGroupWaitBits MPU_xEventGroupWaitBits
+ #define xEventGroupClearBits MPU_xEventGroupClearBits
+ #define xEventGroupSetBits MPU_xEventGroupSetBits
+ #define xEventGroupSync MPU_xEventGroupSync
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ #define uxEventGroupGetNumber MPU_uxEventGroupGetNumber
+ #define vEventGroupSetNumber MPU_vEventGroupSetNumber
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #define xEventGroupCreate MPU_xEventGroupCreate
+ #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
+ #define vEventGroupDelete MPU_vEventGroupDelete
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xEventGroupGetStaticBuffer MPU_xEventGroupGetStaticBuffer
+ #define xEventGroupClearBitsFromISR MPU_xEventGroupClearBitsFromISR
+ #define xEventGroupSetBitsFromISR MPU_xEventGroupSetBitsFromISR
+ #define xEventGroupGetBitsFromISR MPU_xEventGroupGetBitsFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard message/stream_buffer.h API functions to the MPU
* equivalents. */
#define xStreamBufferSend MPU_xStreamBufferSend
#define xStreamBufferReceive MPU_xStreamBufferReceive
- #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes
- #define vStreamBufferDelete MPU_vStreamBufferDelete
#define xStreamBufferIsFull MPU_xStreamBufferIsFull
#define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty
- #define xStreamBufferReset MPU_xStreamBufferReset
#define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable
#define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable
#define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel
- #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
- #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic
+ #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+
+ #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
+ #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic
+ #define vStreamBufferDelete MPU_vStreamBufferDelete
+ #define xStreamBufferReset MPU_xStreamBufferReset
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xStreamBufferGetStaticBuffers MPU_xStreamBufferGetStaticBuffers
+ #define xStreamBufferSendFromISR MPU_xStreamBufferSendFromISR
+ #define xStreamBufferReceiveFromISR MPU_xStreamBufferReceiveFromISR
+ #define xStreamBufferSendCompletedFromISR MPU_xStreamBufferSendCompletedFromISR
+ #define xStreamBufferReceiveCompletedFromISR MPU_xStreamBufferReceiveCompletedFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Remove the privileged function macro, but keep the PRIVILEGED_DATA
* macro so applications can place data in privileged access sections
@@ -163,6 +247,35 @@
#define PRIVILEGED_DATA __attribute__( ( section( "privileged_data" ) ) )
#define FREERTOS_SYSTEM_CALL
+
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ #define vGrantAccessToTask( xTask, xTaskToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xTaskToGrantAccess ) )
+ #define vRevokeAccessToTask( xTask, xTaskToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xTaskToRevokeAccess ) )
+
+ #define vGrantAccessToSemaphore( xTask, xSemaphoreToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xSemaphoreToGrantAccess ) )
+ #define vRevokeAccessToSemaphore( xTask, xSemaphoreToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xSemaphoreToRevokeAccess ) )
+
+ #define vGrantAccessToQueue( xTask, xQueueToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xQueueToGrantAccess ) )
+ #define vRevokeAccessToQueue( xTask, xQueueToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xQueueToRevokeAccess ) )
+
+ #define vGrantAccessToQueueSet( xTask, xQueueSetToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xQueueSetToGrantAccess ) )
+ #define vRevokeAccessToQueueSet( xTask, xQueueSetToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xQueueSetToRevokeAccess ) )
+
+ #define vGrantAccessToEventGroup( xTask, xEventGroupToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xEventGroupToGrantAccess ) )
+ #define vRevokeAccessToEventGroup( xTask, xEventGroupToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xEventGroupToRevokeAccess ) )
+
+ #define vGrantAccessToStreamBuffer( xTask, xStreamBufferToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xStreamBufferToGrantAccess ) )
+ #define vRevokeAccessToStreamBuffer( xTask, xStreamBufferToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xStreamBufferToRevokeAccess ) )
+
+ #define vGrantAccessToMessageBuffer( xTask, xMessageBufferToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xMessageBufferToGrantAccess ) )
+ #define vRevokeAccessToMessageBuffer( xTask, xMessageBufferToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xMessageBufferToRevokeAccess ) )
+
+ #define vGrantAccessToTimer( xTask, xTimerToGrantAccess ) vGrantAccessToKernelObject( ( xTask ), ( int32_t ) ( xTimerToGrantAccess ) )
+ #define vRevokeAccessToTimer( xTask, xTimerToRevokeAccess ) vRevokeAccessToKernelObject( ( xTask ), ( int32_t ) ( xTimerToRevokeAccess ) )
+
+ #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+
#else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */
/* Ensure API functions go in the privileged execution section. */
diff --git a/Source/include/newlib-freertos.h b/Source/include/newlib-freertos.h
new file mode 100644
index 0000000..b6911c9
--- /dev/null
+++ b/Source/include/newlib-freertos.h
@@ -0,0 +1,62 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef INC_NEWLIB_FREERTOS_H
+#define INC_NEWLIB_FREERTOS_H
+
+/* Note Newlib support has been included by popular demand, but is not
+ * used by the FreeRTOS maintainers themselves. FreeRTOS is not
+ * responsible for resulting newlib operation. User must be familiar with
+ * newlib and must provide system-wide implementations of the necessary
+ * stubs. Be warned that (at the time of writing) the current newlib design
+ * implements a system-wide malloc() that must be provided with locks.
+ *
+ * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
+ * for additional information. */
+
+#include <reent.h>
+
+#define configUSE_C_RUNTIME_TLS_SUPPORT 1
+
+#ifndef configTLS_BLOCK_TYPE
+ #define configTLS_BLOCK_TYPE struct _reent
+#endif
+
+#ifndef configINIT_TLS_BLOCK
+ #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) _REENT_INIT_PTR( &( xTLSBlock ) )
+#endif
+
+#ifndef configSET_TLS_BLOCK
+ #define configSET_TLS_BLOCK( xTLSBlock ) ( _impure_ptr = &( xTLSBlock ) )
+#endif
+
+#ifndef configDEINIT_TLS_BLOCK
+ #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) )
+#endif
+
+#endif /* INC_NEWLIB_FREERTOS_H */
diff --git a/Source/include/picolibc-freertos.h b/Source/include/picolibc-freertos.h
new file mode 100644
index 0000000..63f6927
--- /dev/null
+++ b/Source/include/picolibc-freertos.h
@@ -0,0 +1,90 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef INC_PICOLIBC_FREERTOS_H
+#define INC_PICOLIBC_FREERTOS_H
+
+/* Use picolibc TLS support to allocate space for __thread variables,
+ * initialize them at thread creation and set the TLS context at
+ * thread switch time.
+ *
+ * See the picolibc TLS docs:
+ * https://github.com/picolibc/picolibc/blob/main/doc/tls.md
+ * for additional information. */
+
+#include <picotls.h>
+
+#define configUSE_C_RUNTIME_TLS_SUPPORT 1
+
+#define configTLS_BLOCK_TYPE void *
+
+#define picolibcTLS_SIZE ( ( portPOINTER_SIZE_TYPE ) _tls_size() )
+#define picolibcSTACK_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK )
+
+#if __PICOLIBC_MAJOR__ > 1 || __PICOLIBC_MINOR__ >= 8
+
+/* Picolibc 1.8 and newer have explicit alignment values provided
+ * by the _tls_align() inline */
+ #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) ( _tls_align() - 1 ) )
+#else
+
+/* For older Picolibc versions, use the general port alignment value */
+ #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK )
+#endif
+
+/* Allocate thread local storage block off the end of the
+* stack. The _tls_size() function returns the size (in
+* bytes) of the total TLS area used by the application */
+#if ( portSTACK_GROWTH < 0 )
+
+ #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \
+ do { \
+ pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) \
+ - picolibcTLS_SIZE ) & ~ \
+ configMAX( picolibcSTACK_ALIGNMENT_MASK, \
+ picolibcTLS_ALIGNMENT_MASK ) ); \
+ xTLSBlock = pxTopOfStack; \
+ _init_tls( xTLSBlock ); \
+ } while( 0 )
+#else /* portSTACK_GROWTH */
+ #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \
+ do { \
+ xTLSBlock = ( void * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack + \
+ picolibcTLS_ALIGNMENT_MASK ) & ~picolibcTLS_ALIGNMENT_MASK ); \
+ pxTopOfStack = ( StackType_t * ) ( ( ( ( ( portPOINTER_SIZE_TYPE ) xTLSBlock ) + \
+ picolibcTLS_SIZE ) + picolibcSTACK_ALIGNMENT_MASK ) & \
+ ~picolibcSTACK_ALIGNMENT_MASK ); \
+ _init_tls( xTLSBlock ); \
+ } while( 0 )
+#endif /* portSTACK_GROWTH */
+
+#define configSET_TLS_BLOCK( xTLSBlock ) _set_tls( xTLSBlock )
+
+#define configDEINIT_TLS_BLOCK( xTLSBlock )
+
+#endif /* INC_PICOLIBC_FREERTOS_H */
diff --git a/Source/include/portable.h b/Source/include/portable.h
index e7ec7bd..ec11f0f 100644
--- a/Source/include/portable.h
+++ b/Source/include/portable.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -110,13 +110,15 @@
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION;
#else
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
- #endif
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION;
+ #endif /* if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) */
#else /* if ( portUSING_MPU_WRAPPERS == 1 ) */
#if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
@@ -229,6 +231,37 @@
uint32_t ulStackDepth ) PRIVILEGED_FUNCTION;
#endif
+/**
+ * @brief Checks if the calling task is authorized to access the given buffer.
+ *
+ * @param pvBuffer The buffer which the calling task wants to access.
+ * @param ulBufferLength The length of the pvBuffer.
+ * @param ulAccessRequested The permissions that the calling task wants.
+ *
+ * @return pdTRUE if the calling task is authorized to access the buffer,
+ * pdFALSE otherwise.
+ */
+#if ( portUSING_MPU_WRAPPERS == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) PRIVILEGED_FUNCTION;
+#endif
+
+/**
+ * @brief Checks if the calling task is authorized to access the given kernel object.
+ *
+ * @param lInternalIndexOfKernelObject The index of the kernel object in the kernel
+ * object handle pool.
+ *
+ * @return pdTRUE if the calling task is authorized to access the kernel object,
+ * pdFALSE otherwise.
+ */
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) PRIVILEGED_FUNCTION;
+
+#endif
+
/* *INDENT-OFF* */
#ifdef __cplusplus
}
diff --git a/Source/include/projdefs.h b/Source/include/projdefs.h
index aa49e59..4c46333 100644
--- a/Source/include/projdefs.h
+++ b/Source/include/projdefs.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -44,6 +44,10 @@
#define pdFALSE ( ( BaseType_t ) 0 )
#define pdTRUE ( ( BaseType_t ) 1 )
+#define pdFALSE_SIGNED ( ( BaseType_t ) 0 )
+#define pdTRUE_SIGNED ( ( BaseType_t ) 1 )
+#define pdFALSE_UNSIGNED ( ( UBaseType_t ) 0 )
+#define pdTRUE_UNSIGNED ( ( UBaseType_t ) 1 )
#define pdPASS ( pdTRUE )
#define pdFAIL ( pdFALSE )
@@ -60,10 +64,14 @@
#define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0
#endif
-#if ( configUSE_16_BIT_TICKS == 1 )
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
#define pdINTEGRITY_CHECK_VALUE 0x5a5a
-#else
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
#define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
+ #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5a5a5a5a5aULL
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/* The following errno values are used by FreeRTOS+ components, not FreeRTOS
@@ -96,6 +104,7 @@
#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */
#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */
#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define pdFREERTOS_ERRNO_EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */
#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */
#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */
diff --git a/Source/include/queue.h b/Source/include/queue.h
index df572e1..836adf5 100644
--- a/Source/include/queue.h
+++ b/Source/include/queue.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -238,6 +238,35 @@
/**
* queue. h
* @code{c}
+ * BaseType_t xQueueGetStaticBuffers( QueueHandle_t xQueue,
+ * uint8_t ** ppucQueueStorage,
+ * StaticQueue_t ** ppxStaticQueue );
+ * @endcode
+ *
+ * Retrieve pointers to a statically created queue's data structure buffer
+ * and storage area buffer. These are the same buffers that are supplied
+ * at the time of creation.
+ *
+ * @param xQueue The queue for which to retrieve the buffers.
+ *
+ * @param ppucQueueStorage Used to return a pointer to the queue's storage
+ * area buffer.
+ *
+ * @param ppxStaticQueue Used to return a pointer to the queue's data
+ * structure buffer.
+ *
+ * @return pdTRUE if buffers were retrieved, pdFALSE otherwise.
+ *
+ * \defgroup xQueueGetStaticBuffers xQueueGetStaticBuffers
+ * \ingroup QueueManagement
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ #define xQueueGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue ) xQueueGenericGetStaticBuffers( ( xQueue ), ( ppucQueueStorage ), ( ppxStaticQueue ) )
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * queue. h
+ * @code{c}
* BaseType_t xQueueSendToToFront(
* QueueHandle_t xQueue,
* const void *pvItemToQueue,
@@ -1346,9 +1375,9 @@
* @param pvBuffer Pointer to the buffer into which the received item will
* be copied.
*
- * @param pxTaskWoken A task may be blocked waiting for space to become
- * available on the queue. If xQueueReceiveFromISR causes such a task to
- * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will
+ * @param pxHigherPriorityTaskWoken A task may be blocked waiting for space to
+ * become available on the queue. If xQueueReceiveFromISR causes such a task
+ * to unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will
* remain unchanged.
*
* @return pdTRUE if an item was successfully received from the queue,
@@ -1565,6 +1594,18 @@
#endif
/*
+ * Generic version of the function used to retrieve the buffers of statically
+ * created queues. This is called by other functions and macros that retrieve
+ * the buffers of other statically created RTOS objects that use the queue
+ * structure as their base.
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION;
+#endif
+
+/*
* Queue sets provide a mechanism to allow a task to block (pend) on a read
* operation from multiple queues or semaphores simultaneously.
*
@@ -1711,7 +1752,8 @@
UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION;
UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
-
+UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/* *INDENT-OFF* */
#ifdef __cplusplus
diff --git a/Source/include/semphr.h b/Source/include/semphr.h
index d3165d8..46ac85a 100644
--- a/Source/include/semphr.h
+++ b/Source/include/semphr.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -95,13 +95,13 @@
*/
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
#define vSemaphoreCreateBinary( xSemaphore ) \
- { \
+ do { \
( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \
if( ( xSemaphore ) != NULL ) \
{ \
( void ) xSemaphoreGive( ( xSemaphore ) ); \
} \
- }
+ } while( 0 )
#endif
/**
@@ -1190,4 +1190,25 @@
*/
#define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
+/**
+ * semphr.h
+ * @code{c}
+ * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore );
+ * @endcode
+ *
+ * Retrieve pointer to a statically created binary semaphore, counting semaphore,
+ * or mutex semaphore's data structure buffer. This is the same buffer that is
+ * supplied at the time of creation.
+ *
+ * @param xSemaphore The semaphore for which to retrieve the buffer.
+ *
+ * @param ppxSemaphoreBuffer Used to return a pointer to the semaphore's
+ * data structure buffer.
+ *
+ * @return pdTRUE if buffer was retrieved, pdFALSE otherwise.
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ #define xSemaphoreGetStaticBuffer( xSemaphore, ppxSemaphoreBuffer ) xQueueGenericGetStaticBuffers( ( QueueHandle_t ) ( xSemaphore ), NULL, ( ppxSemaphoreBuffer ) )
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
#endif /* SEMAPHORE_H */
diff --git a/Source/include/stack_macros.h b/Source/include/stack_macros.h
index 2455674..354400d 100644
--- a/Source/include/stack_macros.h
+++ b/Source/include/stack_macros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -57,13 +57,13 @@
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
- { \
+ do { \
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
- }
+ } while( 0 )
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
@@ -72,14 +72,14 @@
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
- { \
+ do { \
\
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
- }
+ } while( 0 )
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
@@ -87,7 +87,7 @@
#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
- { \
+ do { \
const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \
const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \
\
@@ -98,7 +98,7 @@
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
- }
+ } while( 0 )
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
@@ -106,7 +106,7 @@
#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
- { \
+ do { \
int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \
static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
@@ -122,7 +122,7 @@
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
- }
+ } while( 0 )
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
diff --git a/Source/include/stdint.readme b/Source/include/stdint.readme
index ef2b7f7..11664f3 100644
--- a/Source/include/stdint.readme
+++ b/Source/include/stdint.readme
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/include/stream_buffer.h b/Source/include/stream_buffer.h
index bf5019e..48ca266 100644
--- a/Source/include/stream_buffer.h
+++ b/Source/include/stream_buffer.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -264,6 +264,38 @@
* stream_buffer.h
*
* @code{c}
+ * BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer,
+ * uint8_t ** ppucStreamBufferStorageArea,
+ * StaticStreamBuffer_t ** ppxStaticStreamBuffer );
+ * @endcode
+ *
+ * Retrieve pointers to a statically created stream buffer's data structure
+ * buffer and storage area buffer. These are the same buffers that are supplied
+ * at the time of creation.
+ *
+ * @param xStreamBuffer The stream buffer for which to retrieve the buffers.
+ *
+ * @param ppucStreamBufferStorageArea Used to return a pointer to the stream
+ * buffer's storage area buffer.
+ *
+ * @param ppxStaticStreamBuffer Used to return a pointer to the stream
+ * buffer's data structure buffer.
+ *
+ * @return pdTRUE if buffers were retrieved, pdFALSE otherwise.
+ *
+ * \defgroup xStreamBufferGetStaticBuffers xStreamBufferGetStaticBuffers
+ * \ingroup StreamBufferManagement
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer,
+ uint8_t ** ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t ** ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * stream_buffer.h
+ *
+ * @code{c}
* size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
* const void *pvTxData,
* size_t xDataLengthBytes,
diff --git a/Source/include/task.h b/Source/include/task.h
index ab8eeb8..1207e18 100644
--- a/Source/include/task.h
+++ b/Source/include/task.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -53,10 +53,10 @@
* The tskKERNEL_VERSION_MAJOR, tskKERNEL_VERSION_MINOR, tskKERNEL_VERSION_BUILD
* values will reflect the last released version number.
*/
-#define tskKERNEL_VERSION_NUMBER "V10.5.1"
+#define tskKERNEL_VERSION_NUMBER "V10.6.2"
#define tskKERNEL_VERSION_MAJOR 10
-#define tskKERNEL_VERSION_MINOR 5
-#define tskKERNEL_VERSION_BUILD 1
+#define tskKERNEL_VERSION_MINOR 6
+#define tskKERNEL_VERSION_BUILD 2
/* MPU region parameters passed in ulParameters
* of MemoryRegion_t struct. */
@@ -66,6 +66,11 @@
#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL )
#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL )
+/* MPU region permissions stored in MPU settings to
+ * authorize access requests. */
+#define tskMPU_READ_PERMISSION ( 1UL << 0UL )
+#define tskMPU_WRITE_PERMISSION ( 1UL << 1UL )
+
/* The direct to task notification feature used to have only a single notification
* per task. Now there is an array of notifications per task that is dimensioned by
* configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the
@@ -658,7 +663,7 @@
*
* @param xTask The handle of the task being updated.
*
- * @param xRegions A pointer to a MemoryRegion_t structure that contains the
+ * @param[in] pxRegions A pointer to a MemoryRegion_t structure that contains the
* new memory region definitions.
*
* Example usage:
@@ -1510,6 +1515,36 @@
TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
/**
+ * task. h
+ * @code{c}
+ * BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
+ * StackType_t ** ppuxStackBuffer,
+ * StaticTask_t ** ppxTaskBuffer );
+ * @endcode
+ *
+ * Retrieve pointers to a statically created task's data structure
+ * buffer and stack buffer. These are the same buffers that are supplied
+ * at the time of creation.
+ *
+ * @param xTask The task for which to retrieve the buffers.
+ *
+ * @param ppuxStackBuffer Used to return a pointer to the task's stack buffer.
+ *
+ * @param ppxTaskBuffer Used to return a pointer to the task's data structure
+ * buffer.
+ *
+ * @return pdTRUE if buffers were retrieved, pdFALSE otherwise.
+ *
+ * \defgroup xTaskGetStaticBuffers xTaskGetStaticBuffers
+ * \ingroup TaskUtils
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
* task.h
* @code{c}
* UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
@@ -1634,7 +1669,7 @@
/**
* task.h
* @code{c}
- * void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName);
+ * void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName);
* @endcode
*
* The application stack overflow hook is called when a stack overflow is detected for a task.
@@ -1649,7 +1684,25 @@
#endif
-#if ( configUSE_TICK_HOOK > 0 )
+#if ( configUSE_IDLE_HOOK == 1 )
+
+/**
+ * task.h
+ * @code{c}
+ * void vApplicationIdleHook( void );
+ * @endcode
+ *
+ * The application idle hook is called by the idle task.
+ * This allows the application designer to add background functionality without
+ * the overhead of a separate task.
+ * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, CALL A FUNCTION THAT MIGHT BLOCK.
+ */
+ void vApplicationIdleHook( void );
+
+#endif
+
+
+#if ( configUSE_TICK_HOOK != 0 )
/**
* task.h
@@ -1919,17 +1972,52 @@
/**
* task. h
* @code{c}
+ * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask );
+ * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask );
+ * @endcode
+ *
+ * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be
+ * available. The application must also then provide definitions for
+ * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
+ * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and
+ * return the timers current count value respectively. The counter should be
+ * at least 10 times the frequency of the tick count.
+ *
+ * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total
+ * accumulated execution time being stored for each task. The resolution
+ * of the accumulated time value depends on the frequency of the timer
+ * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro.
+ * While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total
+ * execution time of each task into a buffer, ulTaskGetRunTimeCounter()
+ * returns the total execution time of just one task and
+ * ulTaskGetRunTimePercent() returns the percentage of the CPU time used by
+ * just one task.
+ *
+ * @return The total run time of the given task or the percentage of the total
+ * run time consumed by the given task. This is the amount of time the task
+ * has actually been executing. The unit of time is dependent on the frequency
+ * configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
+ * portGET_RUN_TIME_COUNTER_VALUE() macros.
+ *
+ * \defgroup ulTaskGetRunTimeCounter ulTaskGetRunTimeCounter
+ * \ingroup TaskUtils
+ */
+configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * @code{c}
* configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void );
* configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void );
* @endcode
*
- * configGENERATE_RUN_TIME_STATS, configUSE_STATS_FORMATTING_FUNCTIONS and
- * INCLUDE_xTaskGetIdleTaskHandle must all be defined as 1 for these functions
- * to be available. The application must also then provide definitions for
- * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE()
- * to configure a peripheral timer/counter and return the timers current count
- * value respectively. The counter should be at least 10 times the frequency of
- * the tick count.
+ * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be
+ * available. The application must also then provide definitions for
+ * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
+ * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and
+ * return the timers current count value respectively. The counter should be
+ * at least 10 times the frequency of the tick count.
*
* Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total
* accumulated execution time being stored for each task. The resolution
@@ -3109,6 +3197,35 @@
*/
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
+#if ( portUSING_MPU_WRAPPERS == 1 )
+
+/*
+ * For internal use only. Get MPU settings associated with a task.
+ */
+ xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+#endif /* portUSING_MPU_WRAPPERS */
+
+
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/*
+ * For internal use only. Grant/Revoke a task's access to a kernel object.
+ */
+ void vGrantAccessToKernelObject( TaskHandle_t xExternalTaskHandle,
+ int32_t lExternalKernelObjectHandle ) PRIVILEGED_FUNCTION;
+ void vRevokeAccessToKernelObject( TaskHandle_t xExternalTaskHandle,
+ int32_t lExternalKernelObjectHandle ) PRIVILEGED_FUNCTION;
+
+/*
+ * For internal use only. Grant/Revoke a task's access to a kernel object.
+ */
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) PRIVILEGED_FUNCTION;
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
/* *INDENT-OFF* */
#ifdef __cplusplus
diff --git a/Source/include/timers.h b/Source/include/timers.h
index 4b73908..0900edb 100644
--- a/Source/include/timers.h
+++ b/Source/include/timers.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -1323,6 +1323,26 @@
*/
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+/**
+ * BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ * StaticTimer_t ** ppxTimerBuffer );
+ *
+ * Retrieve pointer to a statically created timer's data structure
+ * buffer. This is the same buffer that is supplied at the time of
+ * creation.
+ *
+ * @param xTimer The timer for which to retrieve the buffer.
+ *
+ * @param ppxTaskBuffer Used to return a pointer to the timers's data
+ * structure buffer.
+ *
+ * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise.
+ */
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
/*
* Functions beyond this part are not part of the public API and are intended
* for use by the kernel only.
@@ -1361,6 +1381,20 @@
#endif
+#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 )
+
+/**
+ * timers.h
+ * @code{c}
+ * void vApplicationDaemonTaskStartupHook( void );
+ * @endcode
+ *
+ * This hook function is called form the timer task once when the task starts running.
+ */
+ void vApplicationDaemonTaskStartupHook( void );
+
+#endif
+
/* *INDENT-OFF* */
#ifdef __cplusplus
}
diff --git a/Source/list.c b/Source/list.c
index 0f4f42e..6129aff 100644
--- a/Source/list.c
+++ b/Source/list.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/Common/mpu_wrappers.c b/Source/portable/Common/mpu_wrappers.c
index 9a45ba8..17efcdb 100644
--- a/Source/portable/Common/mpu_wrappers.c
+++ b/Source/portable/Common/mpu_wrappers.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -48,7 +48,11 @@
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/*-----------------------------------------------------------*/
-#if ( portUSING_MPU_WRAPPERS == 1 )
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ #error Access control list is not available with this MPU wrapper. Please set configENABLE_ACCESS_CONTROL_LIST to 0.
+ #endif
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode,
@@ -1817,7 +1821,7 @@
#if ( configUSE_TIMERS == 1 )
void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
- const UBaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */
+ const BaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */
{
if( portIS_PRIVILEGED() == pdFALSE )
{
@@ -2537,5 +2541,5 @@
#endif
/*-----------------------------------------------------------*/
-#endif /* portUSING_MPU_WRAPPERS == 1 */
+#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) */
/*-----------------------------------------------------------*/
diff --git a/Source/portable/Common/mpu_wrappers_v2.c b/Source/portable/Common/mpu_wrappers_v2.c
new file mode 100644
index 0000000..f82762d
--- /dev/null
+++ b/Source/portable/Common/mpu_wrappers_v2.c
@@ -0,0 +1,5054 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/*
+ * Implementation of the wrapper functions used to raise the processor privilege
+ * before calling a standard FreeRTOS API function.
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #ifndef configPROTECTED_KERNEL_OBJECT_POOL_SIZE
+ #error configPROTECTED_KERNEL_OBJECT_POOL_SIZE must be defined to maximum number of kernel objects in the application.
+ #endif
+
+/**
+ * @brief Offset added to the index before returning to the user.
+ *
+ * If the actual handle is stored at index i, ( i + INDEX_OFFSET )
+ * is returned to the user.
+ */
+ #define INDEX_OFFSET 1
+
+/**
+ * @brief Opaque type for a kernel object.
+ */
+ struct OpaqueObject;
+ typedef struct OpaqueObject * OpaqueObjectHandle_t;
+
+/**
+ * @brief Defines kernel object in the kernel object pool.
+ */
+ typedef struct KernelObject
+ {
+ OpaqueObjectHandle_t xInternalObjectHandle;
+ uint32_t ulKernelObjectType;
+ void * pvKernelObjectData;
+ } KernelObject_t;
+
+/**
+ * @brief Kernel object types.
+ */
+ #define KERNEL_OBJECT_TYPE_INVALID ( 0UL )
+ #define KERNEL_OBJECT_TYPE_QUEUE ( 1UL )
+ #define KERNEL_OBJECT_TYPE_TASK ( 2UL )
+ #define KERNEL_OBJECT_TYPE_STREAM_BUFFER ( 3UL )
+ #define KERNEL_OBJECT_TYPE_EVENT_GROUP ( 4UL )
+ #define KERNEL_OBJECT_TYPE_TIMER ( 5UL )
+
+/**
+ * @brief Checks whether an external index is valid or not.
+ */
+ #define IS_EXTERNAL_INDEX_VALID( lIndex ) \
+ ( ( ( lIndex ) >= INDEX_OFFSET ) && \
+ ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) )
+
+/**
+ * @brief Checks whether an internal index is valid or not.
+ */
+ #define IS_INTERNAL_INDEX_VALID( lIndex ) \
+ ( ( ( lIndex ) >= 0 ) && \
+ ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) )
+
+/**
+ * @brief Converts an internal index into external.
+ */
+ #define CONVERT_TO_EXTERNAL_INDEX( lIndex ) ( ( lIndex ) + INDEX_OFFSET )
+
+/**
+ * @brief Converts an external index into internal.
+ */
+ #define CONVERT_TO_INTERNAL_INDEX( lIndex ) ( ( lIndex ) - INDEX_OFFSET )
+
+/**
+ * @brief Max value that fits in a uint32_t type.
+ */
+ #define mpuUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/**
+ * @brief Check if multiplying a and b will result in overflow.
+ */
+ #define mpuMULTIPLY_UINT32_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( mpuUINT32_MAX / ( a ) ) ) )
+
+/**
+ * @brief Get the index of a free slot in the kernel object pool.
+ *
+ * If a free slot is found, this function marks the slot as
+ * "not free".
+ *
+ * @return Index of a free slot is returned, if a free slot is
+ * found. Otherwise -1 is returned.
+ */
+ static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Set the given index as free in the kernel object pool.
+ *
+ * @param lIndex The index to set as free.
+ */
+ static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Get the index at which a given kernel object is stored.
+ *
+ * @param xHandle The given kernel object handle.
+ * @param ulKernelObjectType The kernel object type.
+ *
+ * @return Index at which the kernel object is stored if it is a valid
+ * handle, -1 otherwise.
+ */
+ static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Store the given kernel object handle at the given index in
+ * the kernel object pool.
+ *
+ * @param lIndex Index to store the given handle at.
+ * @param xHandle Kernel object handle to store.
+ * @param pvKernelObjectData The data associated with the kernel object.
+ * Currently, only used for timer objects to store timer callback.
+ * @param ulKernelObjectType The kernel object type.
+ */
+ static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex,
+ OpaqueObjectHandle_t xHandle,
+ void * pvKernelObjectData,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Get the kernel object handle at the given index from
+ * the kernel object pool.
+ *
+ * @param lIndex Index at which to get the kernel object handle.
+ * @param ulKernelObjectType The kernel object type.
+ *
+ * @return The kernel object handle at the index.
+ */
+ static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+ #if ( configUSE_TIMERS == 1 )
+
+/**
+ * @brief The function registered as callback for all the timers.
+ *
+ * We intercept all the timer callbacks so that we can call application
+ * callbacks with opaque handle.
+ *
+ * @param xInternalHandle The internal timer handle.
+ */
+ static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) PRIVILEGED_FUNCTION;
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+
+/*
+ * Wrappers to keep all the casting in one place.
+ */
+ #define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+ #define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE )
+ #endif
+
+/*
+ * Wrappers to keep all the casting in one place for Task APIs.
+ */
+ #define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK )
+ #define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK )
+ #define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK )
+
+/*
+ * Wrappers to keep all the casting in one place for Event Group APIs.
+ */
+ #define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+ #define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+ #define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+
+/*
+ * Wrappers to keep all the casting in one place for Stream Buffer APIs.
+ */
+ #define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+ #define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+ #define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+
+ #if ( configUSE_TIMERS == 1 )
+
+/*
+ * Wrappers to keep all the casting in one place for Timer APIs.
+ */
+ #define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER )
+ #define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER )
+ #define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER )
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Kernel object pool.
+ */
+ PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL };
+/*-----------------------------------------------------------*/
+
+ static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lFreeIndex = -1;
+
+ /* This function is called only from resource create APIs
+ * which are not supposed to be called from ISRs. Therefore,
+ * we only need to suspend the scheduler and do not require
+ * critical section. */
+ vTaskSuspendAll();
+ {
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL )
+ {
+ /* Mark this index as not free. */
+ xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 );
+ lFreeIndex = i;
+ break;
+ }
+ }
+ }
+ xTaskResumeAll();
+
+ return lFreeIndex;
+ }
+/*-----------------------------------------------------------*/
+
+ static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) /* PRIVILEGED_FUNCTION */
+ {
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+
+ taskENTER_CRITICAL();
+ {
+ xKernelObjectPool[ lIndex ].xInternalObjectHandle = NULL;
+ xKernelObjectPool[ lIndex ].ulKernelObjectType = KERNEL_OBJECT_TYPE_INVALID;
+ xKernelObjectPool[ lIndex ].pvKernelObjectData = NULL;
+ }
+ taskEXIT_CRITICAL();
+ }
+/*-----------------------------------------------------------*/
+
+ static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lIndex = -1;
+
+ configASSERT( xHandle != NULL );
+
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( ( xKernelObjectPool[ i ].xInternalObjectHandle == xHandle ) &&
+ ( xKernelObjectPool[ i ].ulKernelObjectType == ulKernelObjectType ) )
+ {
+ lIndex = i;
+ break;
+ }
+ }
+
+ return lIndex;
+ }
+/*-----------------------------------------------------------*/
+
+ static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex,
+ OpaqueObjectHandle_t xHandle,
+ void * pvKernelObjectData,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+ xKernelObjectPool[ lIndex ].xInternalObjectHandle = xHandle;
+ xKernelObjectPool[ lIndex ].ulKernelObjectType = ulKernelObjectType;
+ xKernelObjectPool[ lIndex ].pvKernelObjectData = pvKernelObjectData;
+ }
+/*-----------------------------------------------------------*/
+
+ static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ OpaqueObjectHandle_t xObjectHandle = NULL;
+
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+
+ if( xKernelObjectPool[ lIndex ].ulKernelObjectType == ulKernelObjectType )
+ {
+ xObjectHandle = xKernelObjectPool[ lIndex ].xInternalObjectHandle;
+ }
+
+ return xObjectHandle;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ void vGrantAccessToKernelObject( TaskHandle_t xExternalTaskHandle,
+ int32_t lExternalKernelObjectHandle ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lExternalTaskIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( IS_EXTERNAL_INDEX_VALID( lExternalKernelObjectHandle ) != pdFALSE )
+ {
+ if( xExternalTaskHandle == NULL )
+ {
+ vPortGrantAccessToKernelObject( xExternalTaskHandle, CONVERT_TO_INTERNAL_INDEX( lExternalKernelObjectHandle ) );
+ }
+ else
+ {
+ lExternalTaskIndex = ( int32_t ) xExternalTaskHandle;
+
+ if( IS_EXTERNAL_INDEX_VALID( lExternalTaskIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lExternalTaskIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vPortGrantAccessToKernelObject( xInternalTaskHandle,
+ CONVERT_TO_INTERNAL_INDEX( lExternalKernelObjectHandle ) );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ void vRevokeAccessToKernelObject( TaskHandle_t xExternalTaskHandle,
+ int32_t lExternalKernelObjectHandle ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lExternalTaskIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( IS_EXTERNAL_INDEX_VALID( lExternalKernelObjectHandle ) != pdFALSE )
+ {
+ if( xExternalTaskHandle == NULL )
+ {
+ vPortRevokeAccessToKernelObject( xExternalTaskHandle, CONVERT_TO_INTERNAL_INDEX( lExternalKernelObjectHandle ) );
+ }
+ else
+ {
+ lExternalTaskIndex = ( int32_t ) xExternalTaskHandle;
+
+ if( IS_EXTERNAL_INDEX_VALID( lExternalTaskIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lExternalTaskIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vPortRevokeAccessToKernelObject( xInternalTaskHandle,
+ CONVERT_TO_INTERNAL_INDEX( lExternalKernelObjectHandle ) );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lIndex = -1;
+ TimerHandle_t xExternalHandle = NULL;
+ TimerCallbackFunction_t pxApplicationCallBack = NULL;
+
+ /* Coming from the timer task and therefore, should be valid. */
+ configASSERT( xInternalHandle != NULL );
+
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( ( ( TimerHandle_t ) xKernelObjectPool[ i ].xInternalObjectHandle == xInternalHandle ) &&
+ ( xKernelObjectPool[ i ].ulKernelObjectType == KERNEL_OBJECT_TYPE_TIMER ) )
+ {
+ lIndex = i;
+ break;
+ }
+ }
+
+ configASSERT( lIndex != -1 );
+ xExternalHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+
+ pxApplicationCallBack = ( TimerCallbackFunction_t ) xKernelObjectPool[ lIndex ].pvKernelObjectData;
+ pxApplicationCallBack( xExternalHandle );
+ }
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for tasks APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime,
+ TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime,
+ TickType_t xTimeIncrement ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsPreviousWakeTimeAccessible = pdFALSE;
+
+ if( ( pxPreviousWakeTime != NULL ) && ( xTimeIncrement > 0U ) )
+ {
+ xIsPreviousWakeTimeAccessible = xPortIsAuthorizedToAccessBuffer( pxPreviousWakeTime,
+ sizeof( TickType_t ),
+ ( tskMPU_WRITE_PERMISSION | tskMPU_READ_PERMISSION ) );
+
+ if( xIsPreviousWakeTimeAccessible == pdTRUE )
+ {
+ xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskAbortDelay( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) /* PRIVILEGED_FUNCTION */
+ {
+ vTaskDelay( xTicksToDelay );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = configMAX_PRIORITIES;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( pxTask == NULL )
+ {
+ uxReturn = uxTaskPriorityGet( pxTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskPriorityGet( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) PRIVILEGED_FUNCTION;
+
+ eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */
+ {
+ eTaskState eReturn = eInvalid;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ eReturn = eTaskGetState( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return eReturn;
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfoImpl( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskGetInfoImpl( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xIsTaskStatusWriteable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ xIsTaskStatusWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatus,
+ sizeof( TaskStatus_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsTaskStatusWriteable == pdTRUE )
+ {
+ if( xTask == NULL )
+ {
+ vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskGetInfo( xInternalTaskHandle, pxTaskStatus, xGetFreeStackSpace, eState );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xIdleTaskHandle = NULL;
+
+ xIdleTaskHandle = xTaskGetIdleTaskHandle();
+
+ return xIdleTaskHandle;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( pxTaskToSuspend == NULL )
+ {
+ vTaskSuspend( pxTaskToSuspend );
+ }
+ else
+ {
+ /* After the scheduler starts, only privileged tasks are allowed
+ * to suspend other tasks. */
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+ if( ( xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ) || ( portIS_TASK_PRIVILEGED() == pdTRUE ) )
+ #else
+ if( portIS_TASK_PRIVILEGED() == pdTRUE )
+ #endif
+ {
+ lIndex = ( int32_t ) pxTaskToSuspend;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSuspend( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ lIndex = ( int32_t ) pxTaskToResume;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskResume( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCountImpl( void ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTaskGetTickCountImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn;
+
+ xReturn = xTaskGetTickCount();
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn;
+
+ uxReturn = uxTaskGetNumberOfTasks();
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
+
+ char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) /* PRIVILEGED_FUNCTION */
+ {
+ char * pcReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTaskToQuery == NULL )
+ {
+ pcReturn = pcTaskGetName( xTaskToQuery );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToQuery;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ pcReturn = pcTaskGetName( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return pcReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ xReturn = ulTaskGetRunTimeCounter( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = ulTaskGetRunTimeCounter( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ xReturn = ulTaskGetRunTimePercent( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = ulTaskGetRunTimePercent( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn;
+
+ xReturn = ulTaskGetIdleRunTimePercent();
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn;
+
+ xReturn = ulTaskGetIdleRunTimeCounter();
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask,
+ TaskHookFunction_t pxTagValue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask,
+ TaskHookFunction_t pxTagValue ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ vTaskSetApplicationTaskTag( xTask, pxTagValue );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSetApplicationTaskTag( xInternalTaskHandle, pxTagValue );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHookFunction_t xReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTag( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTag( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTaskToSet == NULL )
+ {
+ vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSetThreadLocalStoragePointer( xInternalTaskHandle, xIndex, pvValue );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) PRIVILEGED_FUNCTION;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* PRIVILEGED_FUNCTION */
+ {
+ void * pvReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTaskToQuery == NULL )
+ {
+ pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToQuery;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ pvReturn = pvTaskGetThreadLocalStoragePointer( xInternalTaskHandle, xIndex );
+ }
+ }
+ }
+ }
+
+ return pvReturn;
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray,
+ UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray,
+ UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ UBaseType_t xIsTaskStatusArrayWriteable = pdFALSE;
+ UBaseType_t xIsTotalRunTimeWriteable = pdFALSE;
+ uint32_t ulArraySize = ( uint32_t ) uxArraySize;
+ uint32_t ulTaskStatusSize = ( uint32_t ) sizeof( TaskStatus_t );
+
+ if( mpuMULTIPLY_UINT32_WILL_OVERFLOW( ulTaskStatusSize, ulArraySize ) == 0 )
+ {
+ xIsTaskStatusArrayWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatusArray,
+ ulTaskStatusSize * ulArraySize,
+ tskMPU_WRITE_PERMISSION );
+
+ if( pulTotalRunTime != NULL )
+ {
+ xIsTotalRunTimeWriteable = xPortIsAuthorizedToAccessBuffer( pulTotalRunTime,
+ sizeof( configRUN_TIME_COUNTER_TYPE ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( xIsTaskStatusArrayWriteable == pdTRUE ) &&
+ ( ( pulTotalRunTime == NULL ) || ( xIsTotalRunTimeWriteable == pdTRUE ) ) )
+ {
+ uxReturn = uxTaskGetSystemState( pxTaskStatusArray, ( UBaseType_t ) ulArraySize, pulTotalRunTime );
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configSTACK_DEPTH_TYPE uxReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark2( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark2( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ TaskHandle_t xExternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+
+ if( xInternalTaskHandle != NULL )
+ {
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = taskSCHEDULER_NOT_STARTED;
+
+ xReturn = xTaskGetSchedulerState();
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xIsTimeOutWriteable = pdFALSE;
+
+ if( pxTimeOut != NULL )
+ {
+ xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut,
+ sizeof( TimeOut_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsTimeOutWriteable == pdTRUE )
+ {
+ vTaskSetTimeOutState( pxTimeOut );
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ BaseType_t xIsTimeOutWriteable = pdFALSE;
+ BaseType_t xIsTicksToWaitWriteable = pdFALSE;
+
+ if( ( pxTimeOut != NULL ) && ( pxTicksToWait != NULL ) )
+ {
+ xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut,
+ sizeof( TimeOut_t ),
+ tskMPU_WRITE_PERMISSION );
+ xIsTicksToWaitWriteable = xPortIsAuthorizedToAccessBuffer( pxTicksToWait,
+ sizeof( TickType_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( ( xIsTimeOutWriteable == pdTRUE ) && ( xIsTicksToWaitWriteable == pdTRUE ) )
+ {
+ xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */
+ {
+ BaseType_t xReturn = pdFAIL;
+ xTaskGenericNotifyParams_t xParams;
+
+ xParams.xTaskToNotify = xTaskToNotify;
+ xParams.uxIndexToNotify = uxIndexToNotify;
+ xParams.ulValue = ulValue;
+ xParams.eAction = eAction;
+ xParams.pulPreviousNotificationValue = pulPreviousNotificationValue;
+
+ xReturn = MPU_xTaskGenericNotifyEntry( &( xParams ) );
+
+ return xReturn;
+ }
+
+ BaseType_t MPU_xTaskGenericNotifyImpl( const xTaskGenericNotifyParams_t * pxParams ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyImpl( const xTaskGenericNotifyParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xIsPreviousNotificationValueWriteable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+ BaseType_t xAreParamsReadable = pdFALSE;
+
+ if( pxParams != NULL )
+ {
+ xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
+ sizeof( xTaskGenericNotifyParams_t ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( xAreParamsReadable == pdTRUE )
+ {
+ if( ( pxParams->uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ) &&
+ ( ( pxParams->eAction == eNoAction ) ||
+ ( pxParams->eAction == eSetBits ) ||
+ ( pxParams->eAction == eIncrement ) ||
+ ( pxParams->eAction == eSetValueWithOverwrite ) ||
+ ( pxParams->eAction == eSetValueWithoutOverwrite ) ) )
+ {
+ if( pxParams->pulPreviousNotificationValue != NULL )
+ {
+ xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pxParams->pulPreviousNotificationValue,
+ sizeof( uint32_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pxParams->pulPreviousNotificationValue == NULL ) ||
+ ( xIsPreviousNotificationValueWriteable == pdTRUE ) )
+ {
+ lIndex = ( int32_t ) ( pxParams->xTaskToNotify );
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotify( xInternalTaskHandle,
+ pxParams->uxIndexToNotify,
+ pxParams->ulValue,
+ pxParams->eAction,
+ pxParams->pulPreviousNotificationValue );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait )
+ {
+ BaseType_t xReturn = pdFAIL;
+ xTaskGenericNotifyWaitParams_t xParams;
+
+ xParams.uxIndexToWaitOn = uxIndexToWaitOn;
+ xParams.ulBitsToClearOnEntry = ulBitsToClearOnEntry;
+ xParams.ulBitsToClearOnExit = ulBitsToClearOnExit;
+ xParams.pulNotificationValue = pulNotificationValue;
+ xParams.xTicksToWait = xTicksToWait;
+
+ xReturn = MPU_xTaskGenericNotifyWaitEntry( &( xParams ) );
+
+ return xReturn;
+ }
+
+ BaseType_t MPU_xTaskGenericNotifyWaitImpl( const xTaskGenericNotifyWaitParams_t * pxParams ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitImpl( const xTaskGenericNotifyWaitParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsNotificationValueWritable = pdFALSE;
+ BaseType_t xAreParamsReadable = pdFALSE;
+
+ if( pxParams != NULL )
+ {
+ xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
+ sizeof( xTaskGenericNotifyWaitParams_t ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( xAreParamsReadable == pdTRUE )
+ {
+ if( pxParams->uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES )
+ {
+ if( pxParams->pulNotificationValue != NULL )
+ {
+ xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pxParams->pulNotificationValue,
+ sizeof( uint32_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pxParams->pulNotificationValue == NULL ) ||
+ ( xIsNotificationValueWritable == pdTRUE ) )
+ {
+ xReturn = xTaskGenericNotifyWait( pxParams->uxIndexToWaitOn,
+ pxParams->ulBitsToClearOnEntry,
+ pxParams->ulBitsToClearOnExit,
+ pxParams->pulNotificationValue,
+ pxParams->xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulReturn = 0;
+
+ if( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES )
+ {
+ ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
+ }
+
+ return ulReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES )
+ {
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotifyStateClear( xInternalTaskHandle, uxIndexToClear );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTask = pdFALSE;
+
+ if( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES )
+ {
+ if( xTask == NULL )
+ {
+ ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTask = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTask == pdTRUE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ ulReturn = ulTaskGenericNotifyValueClear( xInternalTaskHandle, uxIndexToClear, ulBitsToClear );
+ }
+ }
+ }
+ }
+ }
+
+ return ulReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode,
+ const char * const pcName,
+ uint16_t usStackDepth,
+ void * pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ /* xTaskCreate() can only be used to create privileged tasks in MPU port. */
+ if( ( uxPriority & portPRIVILEGE_BIT ) != 0 )
+ {
+ xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint32_t ulStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t * const puxStackBuffer,
+ StaticTask_t * const pxTaskBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xExternalTaskHandle = NULL;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTaskHandle = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ {
+ /* By default, an unprivileged task has access to itself. */
+ if( ( uxPriority & portPRIVILEGE_BIT ) == 0 )
+ {
+ vPortGrantAccessToKernelObject( xInternalTaskHandle, lIndex );
+ }
+ }
+ #endif
+
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelete == 1 )
+
+ void MPU_vTaskDelete( TaskHandle_t pxTaskToDelete ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( pxTaskToDelete == NULL )
+ {
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ vTaskDelete( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTaskToDelete;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskDelete( xInternalTaskHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+ }
+
+ #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
+/*-----------------------------------------------------------*/
+
+
+ #if ( INCLUDE_vTaskPrioritySet == 1 )
+
+ void MPU_vTaskPrioritySet( TaskHandle_t pxTask,
+ UBaseType_t uxNewPriority ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( pxTask == NULL )
+ {
+ vTaskPrioritySet( pxTask, uxNewPriority );
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskPrioritySet( xInternalTaskHandle, uxNewPriority );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskPrioritySet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ TaskHandle_t xExternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ xInternalTaskHandle = xTaskGetHandle( pcNameToQuery );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
+ void * pvParameter ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskCallApplicationTaskHook( xInternalTaskHandle, pvParameter );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xReturn = xTaskCreateRestricted( pxTaskDefinition, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ {
+ /* By default, an unprivileged task has access to itself. */
+ if( ( pxTaskDefinition->uxPriority & portPRIVILEGE_BIT ) == 0 )
+ {
+ vPortGrantAccessToKernelObject( xInternalTaskHandle, lIndex );
+ }
+ }
+ #endif
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xReturn = xTaskCreateRestrictedStatic( pxTaskDefinition, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ {
+ /* By default, an unprivileged task has access to itself. */
+ if( ( pxTaskDefinition->uxPriority & portPRIVILEGE_BIT ) == 0 )
+ {
+ vPortGrantAccessToKernelObject( xInternalTaskHandle, lIndex );
+ }
+ }
+ #endif
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
+ const MemoryRegion_t * const xRegions ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( xTaskToModify == NULL )
+ {
+ vTaskAllocateMPURegions( xTaskToModify, xRegions );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToModify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskAllocateMPURegions( xInternalTaskHandle, xRegions );
+ }
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xReturn = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+ xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = configMAX_PRIORITIES;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskPriorityGetFromISR( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskPriorityGetFromISR( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* #if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
+
+ BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToResume;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskResumeFromISR( xInternalTaskHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )*/
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHookFunction_t xReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTagFromISR( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTagFromISR( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToNotify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotifyFromISR( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToNotify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskGenericNotifyGiveFromISR( xInternalTaskHandle, uxIndexToNotify, pxHigherPriorityTaskWoken );
+ }
+ }
+ }
+ #endif /*#if ( configUSE_TASK_NOTIFICATIONS == 1 )*/
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for queue APIs. */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsItemToQueueReadable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+ UBaseType_t uxQueueItemSize, uxQueueLength;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
+ uxQueueLength = uxQueueGetQueueLength( xInternalQueueHandle );
+
+ if( ( !( ( pvItemToQueue == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) ) &&
+ ( !( ( xCopyPosition == queueOVERWRITE ) && ( uxQueueLength != ( UBaseType_t ) 1U ) ) )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ if( pvItemToQueue != NULL )
+ {
+ xIsItemToQueueReadable = xPortIsAuthorizedToAccessBuffer( pvItemToQueue,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( ( pvItemToQueue == NULL ) || ( xIsItemToQueueReadable == pdTRUE ) )
+ {
+ xReturn = xQueueGenericSend( xInternalQueueHandle, pvItemToQueue, xTicksToWait, xCopyPosition );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ UBaseType_t uxReturn = 0;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) pxQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueMessagesWaiting( xInternalQueueHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ UBaseType_t uxReturn = 0;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueSpacesAvailable( xInternalQueueHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsReceiveBufferWritable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+ UBaseType_t uxQueueItemSize;
+
+ lIndex = ( int32_t ) pxQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
+
+ if( ( !( ( ( pvBuffer ) == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsReceiveBufferWritable == pdTRUE )
+ {
+ xReturn = xQueueReceive( xInternalQueueHandle, pvBuffer, xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsReceiveBufferWritable = pdFALSE;
+ UBaseType_t uxQueueItemSize;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
+
+ if( ( !( ( ( pvBuffer ) == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsReceiveBufferWritable == pdTRUE )
+ {
+ xReturn = xQueuePeek( xInternalQueueHandle, pvBuffer, xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ UBaseType_t uxQueueItemSize;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
+
+ if( ( uxQueueItemSize == 0 )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ xReturn = xQueueSemaphoreTake( xInternalQueueHandle, xTicksToWait );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xMutexHolderTaskInternalHandle = NULL;
+ TaskHandle_t xMutexHolderTaskExternalHandle = NULL;
+ int32_t lIndex, lMutexHolderTaskIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+
+ lIndex = ( int32_t ) xSemaphore;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalQueueHandle );
+
+ if( xMutexHolderTaskInternalHandle != NULL )
+ {
+ lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle );
+
+ if( lMutexHolderTaskIndex != -1 )
+ {
+ xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) );
+ }
+ }
+ }
+ }
+ }
+
+ return xMutexHolderTaskExternalHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex,
+ TickType_t xBlockTime ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex,
+ TickType_t xBlockTime ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ UBaseType_t uxQueueItemSize;
+
+ lIndex = ( int32_t ) xMutex;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
+
+ if( uxQueueItemSize == 0 )
+ {
+ xReturn = xQueueTakeMutexRecursive( xInternalQueueHandle, xBlockTime );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xMutex;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGiveMutexRecursive( xInternalQueueHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet,
+ TickType_t xBlockTimeTicks ) PRIVILEGED_FUNCTION;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet,
+ TickType_t xBlockTimeTicks ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetMemberHandle_t xSelectedMemberInternal = NULL;
+ QueueSetMemberHandle_t xSelectedMemberExternal = NULL;
+ int32_t lIndexQueueSet, lIndexSelectedMember;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueueSet = pdFALSE;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueueSet = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueueSet == pdTRUE )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ xSelectedMemberInternal = xQueueSelectFromSet( xInternalQueueSetHandle, xBlockTimeTicks );
+
+ if( xSelectedMemberInternal != NULL )
+ {
+ lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal );
+
+ if( lIndexSelectedMember != -1 )
+ {
+ xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) );
+ }
+ }
+ }
+ }
+ }
+
+ return xSelectedMemberExternal;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL;
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ int32_t lIndexQueueSet, lIndexQueueSetMember;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueueSet = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueueSetMember = pdFALSE;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+ lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore;
+
+ if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) &&
+ ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) )
+ {
+ xCallingTaskIsAuthorizedToAccessQueueSet = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+ xCallingTaskIsAuthorizedToAccessQueueSetMember = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) );
+
+ if( ( xCallingTaskIsAuthorizedToAccessQueueSet == pdTRUE ) && ( xCallingTaskIsAuthorizedToAccessQueueSetMember == pdTRUE ) )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+ xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) );
+
+ if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) )
+ {
+ xReturn = xQueueAddToSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue,
+ const char * pcName ) PRIVILEGED_FUNCTION;
+
+ void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue,
+ const char * pcName ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueAddToRegistry( xInternalQueueHandle, pcName );
+ }
+ }
+ }
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueUnregisterQueue( xInternalQueueHandle );
+ }
+ }
+ }
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ const char * pcReturn = NULL;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessQueue = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessQueue = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessQueue == pdTRUE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ pcReturn = pcQueueGetName( xInternalQueueHandle );
+ }
+ }
+ }
+
+ return pcReturn;
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ void MPU_vQueueDelete( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueDelete( xInternalQueueHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateMutex( ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
+ StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateCountingSemaphore( UBaseType_t uxCountValue,
+ UBaseType_t uxInitialCount ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ QueueHandle_t MPU_xQueueGenericCreate( UBaseType_t uxQueueLength,
+ UBaseType_t uxItemSize,
+ uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t * pucQueueStorage,
+ StaticQueue_t * pxStaticQueue,
+ const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
+ BaseType_t xNewQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+
+ lIndex = ( uint32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericReset( xInternalQueueHandle, xNewQueue );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueSetHandle_t MPU_xQueueCreateSet( UBaseType_t uxEventQueueLength ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetHandle_t xExternalQueueSetHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueSetHandle = xQueueCreateSet( uxEventQueueLength );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ MPU_StoreQueueSetHandleAtIndex( lIndex, xInternalQueueSetHandle );
+ xExternalQueueSetHandle = ( QueueSetHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueSetHandle;
+ }
+
+ #endif /* if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL;
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ int32_t lIndexQueueSet, lIndexQueueSetMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+ lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore;
+
+ if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) &&
+ ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+ xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) );
+
+ if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) )
+ {
+ xReturn = xQueueRemoveFromSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericGetStaticBuffers( xInternalQueueHandle, ppucQueueStorage, ppxStaticQueue );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /*if ( configSUPPORT_STATIC_ALLOCATION == 1 )*/
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericSendFromISR( xInternalQueueHandle, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGiveFromISR( xInternalQueueHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueuePeekFromISR( xInternalQueueHandle, pvBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueReceiveFromISR( xInternalQueueHandle, pvBuffer, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueIsQueueEmptyFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueIsQueueFullFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueMessagesWaitingFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return uxReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xMutexHolderTaskInternalHandle = NULL;
+ TaskHandle_t xMutexHolderTaskExternalHandle = NULL;
+ int32_t lIndex, lMutexHolderTaskIndex;
+ QueueHandle_t xInternalSemaphoreHandle = NULL;
+
+ lIndex = ( int32_t ) xSemaphore;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalSemaphoreHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalSemaphoreHandle != NULL )
+ {
+ xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalSemaphoreHandle );
+
+ if( xMutexHolderTaskInternalHandle != NULL )
+ {
+ lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle );
+
+ if( lMutexHolderTaskIndex != -1 )
+ {
+ xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) );
+ }
+ }
+ }
+ }
+
+ return xMutexHolderTaskExternalHandle;
+ }
+
+ #endif /* #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetMemberHandle_t xSelectedMemberInternal = NULL;
+ QueueSetMemberHandle_t xSelectedMemberExternal = NULL;
+ int32_t lIndexQueueSet, lIndexSelectedMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ xSelectedMemberInternal = xQueueSelectFromSetFromISR( xInternalQueueSetHandle );
+
+ if( xSelectedMemberInternal != NULL )
+ {
+ lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal );
+
+ if( lIndexSelectedMember != -1 )
+ {
+ xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) );
+ }
+ }
+ }
+ }
+
+ return xSelectedMemberExternal;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for timers APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ void * pvReturn = NULL;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ pvReturn = pvTimerGetTimerID( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return pvReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer,
+ void * pvNewID ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer,
+ void * pvNewID ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ vTimerSetTimerID( xInternalTimerHandle, pvNewID );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerIsTimerActive( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xReturn;
+
+ xReturn = xTimerGetTimerDaemonTaskHandle();
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+ {
+ BaseType_t xReturn = pdFALSE;
+ xTimerGenericCommandParams_t xParams;
+
+ xParams.xTimer = xTimer;
+ xParams.xCommandID = xCommandID;
+ xParams.xOptionalValue = xOptionalValue;
+ xParams.pxHigherPriorityTaskWoken = pxHigherPriorityTaskWoken;
+ xParams.xTicksToWait = xTicksToWait;
+
+ xReturn = MPU_xTimerGenericCommandEntry( &( xParams ) );
+
+ return xReturn;
+ }
+
+ BaseType_t MPU_xTimerGenericCommandImpl( const xTimerGenericCommandParams_t * pxParams ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerGenericCommandImpl( const xTimerGenericCommandParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+ BaseType_t xAreParamsReadable = pdFALSE;
+
+ if( pxParams != NULL )
+ {
+ xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
+ sizeof( xTimerGenericCommandParams_t ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( xAreParamsReadable == pdTRUE )
+ {
+ if( pxParams->pxHigherPriorityTaskWoken != NULL )
+ {
+ xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxParams->pxHigherPriorityTaskWoken,
+ sizeof( BaseType_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pxParams->pxHigherPriorityTaskWoken == NULL ) ||
+ ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) )
+ {
+ lIndex = ( int32_t ) ( pxParams->xTimer );
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGenericCommand( xInternalTimerHandle,
+ pxParams->xCommandID,
+ pxParams->xOptionalValue,
+ pxParams->pxHigherPriorityTaskWoken,
+ pxParams->xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ BaseType_t MPU_xTimerGenericCommandPrivImpl( const xTimerGenericCommandParams_t * pxParams ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerGenericCommandPrivImpl( const xTimerGenericCommandParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ if( pxParams != NULL )
+ {
+ lIndex = ( int32_t ) ( pxParams->xTimer );
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGenericCommand( xInternalTimerHandle,
+ pxParams->xCommandID,
+ pxParams->xOptionalValue,
+ pxParams->pxHigherPriorityTaskWoken,
+ pxParams->xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ const char * pcReturn = NULL;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ pcReturn = pcTimerGetName( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return pcReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer,
+ const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer,
+ const UBaseType_t uxAutoReload ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ vTimerSetReloadMode( xInternalTimerHandle, uxAutoReload );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetReloadMode( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ uxReturn = uxTimerGetReloadMode( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetPeriod( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessTimer = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessTimer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessTimer == pdTRUE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetExpiryTime( xInternalTimerHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ TimerHandle_t xExternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTimerHandle = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction );
+ xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTimerHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t * pxTimerBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ TimerHandle_t xExternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTimerHandle = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback, pxTimerBuffer );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction );
+ xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTimerHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xReturn = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetStaticBuffer( xInternalTimerHandle, ppxTimerBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for event group APIs. */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+ {
+ EventBits_t xReturn = 0;
+ xEventGroupWaitBitsParams_t xParams;
+
+ xParams.xEventGroup = xEventGroup;
+ xParams.uxBitsToWaitFor = uxBitsToWaitFor;
+ xParams.xClearOnExit = xClearOnExit;
+ xParams.xWaitForAllBits = xWaitForAllBits;
+ xParams.xTicksToWait = xTicksToWait;
+
+ xReturn = MPU_xEventGroupWaitBitsEntry( &( xParams ) );
+
+ return xReturn;
+ }
+
+ EventBits_t MPU_xEventGroupWaitBitsImpl( const xEventGroupWaitBitsParams_t * pxParams ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupWaitBitsImpl( const xEventGroupWaitBitsParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+ BaseType_t xAreParamsReadable = pdFALSE;
+
+ if( pxParams != NULL )
+ {
+ xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
+ sizeof( xEventGroupWaitBitsParams_t ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( xAreParamsReadable == pdTRUE )
+ {
+ if( ( ( pxParams->uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
+ ( pxParams->uxBitsToWaitFor != 0 )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( pxParams->xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ lIndex = ( int32_t ) ( pxParams->xEventGroup );
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupWaitBits( xInternalEventGroupHandle,
+ pxParams->uxBitsToWaitFor,
+ pxParams->xClearOnExit,
+ pxParams->xWaitForAllBits,
+ pxParams->xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+
+ if( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 )
+ {
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupClearBits( xInternalEventGroupHandle, uxBitsToClear );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+
+ if( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 )
+ {
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSetBits( xInternalEventGroupHandle, uxBitsToSet );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+
+ if( ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
+ ( uxBitsToWaitFor != 0 )
+ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
+ && ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
+ #endif
+ )
+ {
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSync( xInternalEventGroupHandle, uxBitsToSet, uxBitsToWaitFor, xTicksToWait );
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = uxEventGroupGetNumber( xInternalEventGroupHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumberImpl( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION;
+
+ void MPU_vEventGroupSetNumberImpl( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessEventGroup = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessEventGroup == pdTRUE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ vEventGroupSetNumber( xInternalEventGroupHandle, uxEventGroupNumber );
+ }
+ }
+ }
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ EventGroupHandle_t MPU_xEventGroupCreate( void ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ EventGroupHandle_t xExternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalEventGroupHandle = xEventGroupCreate();
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle );
+ xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalEventGroupHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ EventGroupHandle_t xExternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalEventGroupHandle = xEventGroupCreateStatic( pxEventGroupBuffer );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle );
+ xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalEventGroupHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ vEventGroupDelete( xInternalEventGroupHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupGetStaticBuffer( xInternalEventGroupHandle, ppxEventGroupBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
+
+ BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupClearBitsFromISR( xInternalEventGroupHandle, uxBitsToClear );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
+
+ BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSetBitsFromISR( xInternalEventGroupHandle, uxBitsToSet, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupGetBitsFromISR( xInternalEventGroupHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for stream buffer APIs. */
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsTxDataBufferReadable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ if( pvTxData != NULL )
+ {
+ xIsTxDataBufferReadable = xPortIsAuthorizedToAccessBuffer( pvTxData,
+ xDataLengthBytes,
+ tskMPU_READ_PERMISSION );
+
+ if( xIsTxDataBufferReadable == pdTRUE )
+ {
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSend( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsRxDataBufferWriteable = pdFALSE;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ if( pvRxData != NULL )
+ {
+ xIsRxDataBufferWriteable = xPortIsAuthorizedToAccessBuffer( pvRxData,
+ xBufferLengthBytes,
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsRxDataBufferWriteable == pdTRUE )
+ {
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceive( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, xTicksToWait );
+ }
+ }
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferIsFull( xInternalStreamBufferHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferIsEmpty( xInternalStreamBufferHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSpacesAvailable( xInternalStreamBufferHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferBytesAvailable( xInternalStreamBufferHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSetTriggerLevel( xInternalStreamBufferHandle, xTriggerLevel );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xCallingTaskIsAuthorizedToAccessStreamBuffer = pdFALSE;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xCallingTaskIsAuthorizedToAccessStreamBuffer = xPortIsAuthorizedToAccessKernelObject( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xCallingTaskIsAuthorizedToAccessStreamBuffer == pdTRUE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferNextMessageLengthBytes( xInternalStreamBufferHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xIsMessageBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ StreamBufferHandle_t xExternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ /**
+ * Stream buffer application level callback functionality is disabled for MPU
+ * enabled ports.
+ */
+ configASSERT( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) );
+
+ if( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) )
+ {
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalStreamBufferHandle = xStreamBufferGenericCreate( xBufferSizeBytes,
+ xTriggerLevelBytes,
+ xIsMessageBuffer,
+ NULL,
+ NULL );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle );
+ xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+ else
+ {
+ traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer );
+ xExternalStreamBufferHandle = NULL;
+ }
+
+ return xExternalStreamBufferHandle;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xIsMessageBuffer,
+ uint8_t * const pucStreamBufferStorageArea,
+ StaticStreamBuffer_t * const pxStaticStreamBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ StreamBufferHandle_t xExternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ /**
+ * Stream buffer application level callback functionality is disabled for MPU
+ * enabled ports.
+ */
+ configASSERT( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) );
+
+ if( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) )
+ {
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalStreamBufferHandle = xStreamBufferGenericCreateStatic( xBufferSizeBytes,
+ xTriggerLevelBytes,
+ xIsMessageBuffer,
+ pucStreamBufferStorageArea,
+ pxStaticStreamBuffer,
+ NULL,
+ NULL );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle );
+ xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+ else
+ {
+ traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer );
+ xExternalStreamBufferHandle = NULL;
+ }
+
+ return xExternalStreamBufferHandle;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ vStreamBufferDelete( xInternalStreamBufferHandle );
+ }
+
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReset( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers,
+ uint8_t * ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t * ppxStaticStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffers;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = MPU_xStreamBufferGetStaticBuffers( xInternalStreamBufferHandle, ppucStreamBufferStorageArea, ppxStaticStreamBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSendFromISR( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceiveFromISR( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSendCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) /*PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceiveCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+/* Functions that the application writer wants to execute in privileged mode
+ * can be defined in application_defined_privileged_functions.h. */
+
+ #if configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS == 1
+ #include "application_defined_privileged_functions.h"
+ #endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Array of system call implementation functions.
+ *
+ * The index in the array MUST match the corresponding system call number
+ * defined in mpu_wrappers.h.
+ */
+ PRIVILEGED_DATA UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ] =
+ {
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+ ( UBaseType_t ) MPU_xTaskGenericNotifyImpl, /* SYSTEM_CALL_xTaskGenericNotify. */
+ ( UBaseType_t ) MPU_xTaskGenericNotifyWaitImpl, /* SYSTEM_CALL_xTaskGenericNotifyWait. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotify. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotifyWait. */
+ #endif
+
+ #if ( configUSE_TIMERS == 1 )
+ ( UBaseType_t ) MPU_xTimerGenericCommandImpl, /* SYSTEM_CALL_xTimerGenericCommand. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGenericCommand. */
+ #endif
+
+ ( UBaseType_t ) MPU_xEventGroupWaitBitsImpl, /* SYSTEM_CALL_xEventGroupWaitBits. */
+
+ /* The system calls above this line take 5 parameters. */
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+ ( UBaseType_t ) MPU_xTaskDelayUntilImpl, /* SYSTEM_CALL_xTaskDelayUntil. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskDelayUntil. */
+ #endif
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+ ( UBaseType_t ) MPU_xTaskAbortDelayImpl, /* SYSTEM_CALL_xTaskAbortDelay. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskAbortDelay. */
+ #endif
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+ ( UBaseType_t ) MPU_vTaskDelayImpl, /* SYSTEM_CALL_vTaskDelay. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskDelay. */
+ #endif
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+ ( UBaseType_t ) MPU_uxTaskPriorityGetImpl, /* SYSTEM_CALL_uxTaskPriorityGet. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskPriorityGet. */
+ #endif
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+ ( UBaseType_t ) MPU_eTaskGetStateImpl, /* SYSTEM_CALL_eTaskGetState. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_eTaskGetState. */
+ #endif
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+ ( UBaseType_t ) MPU_vTaskGetInfoImpl, /* SYSTEM_CALL_vTaskGetInfo. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskGetInfo. */
+ #endif
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+ ( UBaseType_t ) MPU_xTaskGetIdleTaskHandleImpl, /* SYSTEM_CALL_xTaskGetIdleTaskHandle. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetIdleTaskHandle. */
+ #endif
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+ ( UBaseType_t ) MPU_vTaskSuspendImpl, /* SYSTEM_CALL_vTaskSuspend. */
+ ( UBaseType_t ) MPU_vTaskResumeImpl, /* SYSTEM_CALL_vTaskResume. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSuspend. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskResume. */
+ #endif
+
+ ( UBaseType_t ) MPU_xTaskGetTickCountImpl, /* SYSTEM_CALL_xTaskGetTickCount. */
+ ( UBaseType_t ) MPU_uxTaskGetNumberOfTasksImpl, /* SYSTEM_CALL_uxTaskGetNumberOfTasks. */
+ ( UBaseType_t ) MPU_pcTaskGetNameImpl, /* SYSTEM_CALL_pcTaskGetName. */
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+ ( UBaseType_t ) MPU_ulTaskGetRunTimeCounterImpl, /* SYSTEM_CALL_ulTaskGetRunTimeCounter. */
+ ( UBaseType_t ) MPU_ulTaskGetRunTimePercentImpl, /* SYSTEM_CALL_ulTaskGetRunTimePercent. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetRunTimeCounter. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetRunTimePercent. */
+ #endif
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+ ( UBaseType_t ) MPU_ulTaskGetIdleRunTimePercentImpl, /* SYSTEM_CALL_ulTaskGetIdleRunTimePercent. */
+ ( UBaseType_t ) MPU_ulTaskGetIdleRunTimeCounterImpl, /* SYSTEM_CALL_ulTaskGetIdleRunTimeCounter. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetIdleRunTimePercent. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetIdleRunTimeCounter. */
+ #endif
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+ ( UBaseType_t ) MPU_vTaskSetApplicationTaskTagImpl, /* SYSTEM_CALL_vTaskSetApplicationTaskTag. */
+ ( UBaseType_t ) MPU_xTaskGetApplicationTaskTagImpl, /* SYSTEM_CALL_xTaskGetApplicationTaskTag. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSetApplicationTaskTag. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetApplicationTaskTag. */
+ #endif
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+ ( UBaseType_t ) MPU_vTaskSetThreadLocalStoragePointerImpl, /* SYSTEM_CALL_vTaskSetThreadLocalStoragePointer. */
+ ( UBaseType_t ) MPU_pvTaskGetThreadLocalStoragePointerImpl, /* SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSetThreadLocalStoragePointer. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer. */
+ #endif
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+ ( UBaseType_t ) MPU_uxTaskGetSystemStateImpl, /* SYSTEM_CALL_uxTaskGetSystemState. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetSystemState. */
+ #endif
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+ ( UBaseType_t ) MPU_uxTaskGetStackHighWaterMarkImpl, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark. */
+ #endif
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+ ( UBaseType_t ) MPU_uxTaskGetStackHighWaterMark2Impl, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark2. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark2. */
+ #endif
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+ ( UBaseType_t ) MPU_xTaskGetCurrentTaskHandleImpl, /* SYSTEM_CALL_xTaskGetCurrentTaskHandle. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetCurrentTaskHandle. */
+ #endif
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+ ( UBaseType_t ) MPU_xTaskGetSchedulerStateImpl, /* SYSTEM_CALL_xTaskGetSchedulerState. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetSchedulerState. */
+ #endif
+
+ ( UBaseType_t ) MPU_vTaskSetTimeOutStateImpl, /* SYSTEM_CALL_vTaskSetTimeOutState. */
+ ( UBaseType_t ) MPU_xTaskCheckForTimeOutImpl, /* SYSTEM_CALL_xTaskCheckForTimeOut. */
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+ ( UBaseType_t ) MPU_ulTaskGenericNotifyTakeImpl, /* SYSTEM_CALL_ulTaskGenericNotifyTake. */
+ ( UBaseType_t ) MPU_xTaskGenericNotifyStateClearImpl, /* SYSTEM_CALL_xTaskGenericNotifyStateClear. */
+ ( UBaseType_t ) MPU_ulTaskGenericNotifyValueClearImpl, /* SYSTEM_CALL_ulTaskGenericNotifyValueClear. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGenericNotifyTake. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotifyStateClear. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGenericNotifyValueClear. */
+ #endif
+
+ ( UBaseType_t ) MPU_xQueueGenericSendImpl, /* SYSTEM_CALL_xQueueGenericSend. */
+ ( UBaseType_t ) MPU_uxQueueMessagesWaitingImpl, /* SYSTEM_CALL_uxQueueMessagesWaiting. */
+ ( UBaseType_t ) MPU_uxQueueSpacesAvailableImpl, /* SYSTEM_CALL_uxQueueSpacesAvailable. */
+ ( UBaseType_t ) MPU_xQueueReceiveImpl, /* SYSTEM_CALL_xQueueReceive. */
+ ( UBaseType_t ) MPU_xQueuePeekImpl, /* SYSTEM_CALL_xQueuePeek. */
+ ( UBaseType_t ) MPU_xQueueSemaphoreTakeImpl, /* SYSTEM_CALL_xQueueSemaphoreTake. */
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+ ( UBaseType_t ) MPU_xQueueGetMutexHolderImpl, /* SYSTEM_CALL_xQueueGetMutexHolder. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueGetMutexHolder. */
+ #endif
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+ ( UBaseType_t ) MPU_xQueueTakeMutexRecursiveImpl, /* SYSTEM_CALL_xQueueTakeMutexRecursive. */
+ ( UBaseType_t ) MPU_xQueueGiveMutexRecursiveImpl, /* SYSTEM_CALL_xQueueGiveMutexRecursive. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueTakeMutexRecursive. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueGiveMutexRecursive. */
+ #endif
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+ ( UBaseType_t ) MPU_xQueueSelectFromSetImpl, /* SYSTEM_CALL_xQueueSelectFromSet. */
+ ( UBaseType_t ) MPU_xQueueAddToSetImpl, /* SYSTEM_CALL_xQueueAddToSet. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueSelectFromSet. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueAddToSet. */
+ #endif
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+ ( UBaseType_t ) MPU_vQueueAddToRegistryImpl, /* SYSTEM_CALL_vQueueAddToRegistry. */
+ ( UBaseType_t ) MPU_vQueueUnregisterQueueImpl, /* SYSTEM_CALL_vQueueUnregisterQueue. */
+ ( UBaseType_t ) MPU_pcQueueGetNameImpl, /* SYSTEM_CALL_pcQueueGetName. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vQueueAddToRegistry. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vQueueUnregisterQueue. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_pcQueueGetName. */
+ #endif
+
+ #if ( configUSE_TIMERS == 1 )
+ ( UBaseType_t ) MPU_pvTimerGetTimerIDImpl, /* SYSTEM_CALL_pvTimerGetTimerID. */
+ ( UBaseType_t ) MPU_vTimerSetTimerIDImpl, /* SYSTEM_CALL_vTimerSetTimerID. */
+ ( UBaseType_t ) MPU_xTimerIsTimerActiveImpl, /* SYSTEM_CALL_xTimerIsTimerActive. */
+ ( UBaseType_t ) MPU_xTimerGetTimerDaemonTaskHandleImpl, /* SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle. */
+ ( UBaseType_t ) MPU_pcTimerGetNameImpl, /* SYSTEM_CALL_pcTimerGetName. */
+ ( UBaseType_t ) MPU_vTimerSetReloadModeImpl, /* SYSTEM_CALL_vTimerSetReloadMode. */
+ ( UBaseType_t ) MPU_xTimerGetReloadModeImpl, /* SYSTEM_CALL_xTimerGetReloadMode. */
+ ( UBaseType_t ) MPU_uxTimerGetReloadModeImpl, /* SYSTEM_CALL_uxTimerGetReloadMode. */
+ ( UBaseType_t ) MPU_xTimerGetPeriodImpl, /* SYSTEM_CALL_xTimerGetPeriod. */
+ ( UBaseType_t ) MPU_xTimerGetExpiryTimeImpl, /* SYSTEM_CALL_xTimerGetExpiryTime. */
+ #else /* if ( configUSE_TIMERS == 1 ) */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_pvTimerGetTimerID. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTimerSetTimerID. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerIsTimerActive. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_pcTimerGetName. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vTimerSetReloadMode. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetReloadMode. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxTimerGetReloadMode. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetPeriod. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetExpiryTime. */
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+
+ ( UBaseType_t ) MPU_xEventGroupClearBitsImpl, /* SYSTEM_CALL_xEventGroupClearBits. */
+ ( UBaseType_t ) MPU_xEventGroupSetBitsImpl, /* SYSTEM_CALL_xEventGroupSetBits. */
+ ( UBaseType_t ) MPU_xEventGroupSyncImpl, /* SYSTEM_CALL_xEventGroupSync. */
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+ ( UBaseType_t ) MPU_uxEventGroupGetNumberImpl, /* SYSTEM_CALL_uxEventGroupGetNumber. */
+ ( UBaseType_t ) MPU_vEventGroupSetNumberImpl, /* SYSTEM_CALL_vEventGroupSetNumber. */
+ #else
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_uxEventGroupGetNumber. */
+ ( UBaseType_t ) 0, /* SYSTEM_CALL_vEventGroupSetNumber. */
+ #endif
+
+ ( UBaseType_t ) MPU_xStreamBufferSendImpl, /* SYSTEM_CALL_xStreamBufferSend. */
+ ( UBaseType_t ) MPU_xStreamBufferReceiveImpl, /* SYSTEM_CALL_xStreamBufferReceive. */
+ ( UBaseType_t ) MPU_xStreamBufferIsFullImpl, /* SYSTEM_CALL_xStreamBufferIsFull. */
+ ( UBaseType_t ) MPU_xStreamBufferIsEmptyImpl, /* SYSTEM_CALL_xStreamBufferIsEmpty. */
+ ( UBaseType_t ) MPU_xStreamBufferSpacesAvailableImpl, /* SYSTEM_CALL_xStreamBufferSpacesAvailable. */
+ ( UBaseType_t ) MPU_xStreamBufferBytesAvailableImpl, /* SYSTEM_CALL_xStreamBufferBytesAvailable. */
+ ( UBaseType_t ) MPU_xStreamBufferSetTriggerLevelImpl, /* SYSTEM_CALL_xStreamBufferSetTriggerLevel. */
+ ( UBaseType_t ) MPU_xStreamBufferNextMessageLengthBytesImpl /* SYSTEM_CALL_xStreamBufferNextMessageLengthBytes. */
+ };
+/*-----------------------------------------------------------*/
+
+#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CA9/port.c b/Source/portable/GCC/ARM_CA9/port.c
new file mode 100644
index 0000000..79fa32d
--- /dev/null
+++ b/Source/portable/GCC/ARM_CA9/port.c
@@ -0,0 +1,570 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+#include <string.h>
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
+ #error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET
+ #error configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configUNIQUE_INTERRUPT_PRIORITIES
+ #error configUNIQUE_INTERRUPT_PRIORITIES must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configSETUP_TICK_INTERRUPT
+ #error configSETUP_TICK_INTERRUPT() must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif /* configSETUP_TICK_INTERRUPT */
+
+#ifndef configMAX_API_CALL_INTERRUPT_PRIORITY
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY == 0
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must not be set to 0
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY > configUNIQUE_INTERRUPT_PRIORITIES
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be less than or equal to configUNIQUE_INTERRUPT_PRIORITIES as the lower the numeric priority value the higher the logical interrupt priority
+#endif
+
+#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+ /* Check the configuration. */
+ #if( configMAX_PRIORITIES > 32 )
+ #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice.
+ #endif
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+/* In case security extensions are implemented. */
+#if configMAX_API_CALL_INTERRUPT_PRIORITY <= ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+#endif
+
+/* Some vendor specific files default configCLEAR_TICK_INTERRUPT() in
+portmacro.h. */
+#ifndef configCLEAR_TICK_INTERRUPT
+ #define configCLEAR_TICK_INTERRUPT()
+#endif
+
+/* A critical section is exited when the critical section nesting count reaches
+this value. */
+#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
+
+/* In all GICs 255 can be written to the priority mask register to unmask all
+(but the lowest) interrupt priority. */
+#define portUNMASK_VALUE ( 0xFFUL )
+
+/* Tasks are not created with a floating point context, but can be given a
+floating point context after they have been created. A variable is stored as
+part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
+does not have an FPU context, or any other value if the task does have an FPU
+context. */
+#define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
+
+/* Constants required to setup the initial task context. */
+#define portINITIAL_SPSR ( ( StackType_t ) 0x1f ) /* System mode, ARM mode, IRQ enabled FIQ enabled. */
+#define portTHUMB_MODE_BIT ( ( StackType_t ) 0x20 )
+#define portINTERRUPT_ENABLE_BIT ( 0x80UL )
+#define portTHUMB_MODE_ADDRESS ( 0x01UL )
+
+/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary
+point is zero. */
+#define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 )
+
+/* Masks all bits in the APSR other than the mode bits. */
+#define portAPSR_MODE_BITS_MASK ( 0x1F )
+
+/* The value of the mode bits in the APSR when the CPU is executing in user
+mode. */
+#define portAPSR_USER_MODE ( 0x10 )
+
+/* The critical section macros only mask interrupts up to an application
+determined priority level. Sometimes it is necessary to turn interrupt off in
+the CPU itself before modifying certain hardware registers. */
+#define portCPU_IRQ_DISABLE() \
+ __asm volatile ( "CPSID i" ::: "memory" ); \
+ __asm volatile ( "DSB" ); \
+ __asm volatile ( "ISB" );
+
+#define portCPU_IRQ_ENABLE() \
+ __asm volatile ( "CPSIE i" ::: "memory" ); \
+ __asm volatile ( "DSB" ); \
+ __asm volatile ( "ISB" );
+
+
+/* Macro to unmask all interrupt priorities. */
+#define portCLEAR_INTERRUPT_MASK() \
+{ \
+ portCPU_IRQ_DISABLE(); \
+ portICCPMR_PRIORITY_MASK_REGISTER = portUNMASK_VALUE; \
+ __asm volatile ( "DSB \n" \
+ "ISB \n" ); \
+ portCPU_IRQ_ENABLE(); \
+}
+
+#define portINTERRUPT_PRIORITY_REGISTER_OFFSET 0x400UL
+#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
+#define portBIT_0_SET ( ( uint8_t ) 0x01 )
+
+/* Let the user override the pre-loading of the initial LR with the address of
+prvTaskExitError() in case it messes up unwinding of the stack in the
+debugger. */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/* The space on the stack required to hold the FPU registers. This is 32 64-bit
+registers, plus a 32-bit status register. */
+#define portFPU_REGISTER_WORDS ( ( 32 * 2 ) + 1 )
+
+/*-----------------------------------------------------------*/
+
+/*
+ * Starts the first task executing. This function is necessarily written in
+ * assembly code so is implemented in portASM.s.
+ */
+extern void vPortRestoreTaskContext( void );
+
+/*
+ * Used to catch tasks that attempt to return from their implementing function.
+ */
+static void prvTaskExitError( void );
+
+/*
+ * If the application provides an implementation of vApplicationIRQHandler(),
+ * then it will get called directly without saving the FPU registers on
+ * interrupt entry, and this weak implementation of
+ * vApplicationFPUSafeIRQHandler() is just provided to remove linkage errors -
+ * it should never actually get called so its implementation contains a
+ * call to configASSERT() that will always fail.
+ *
+ * If the application provides its own implementation of
+ * vApplicationFPUSafeIRQHandler() then the implementation of
+ * vApplicationIRQHandler() provided in portASM.S will save the FPU registers
+ * before calling it.
+ *
+ * Therefore, if the application writer wants FPU registers to be saved on
+ * interrupt entry their IRQ handler must be called
+ * vApplicationFPUSafeIRQHandler(), and if the application writer does not want
+ * FPU registers to be saved on interrupt entry their IRQ handler must be
+ * called vApplicationIRQHandler().
+ */
+void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) __attribute__((weak) );
+
+/*-----------------------------------------------------------*/
+
+/* A variable is used to keep track of the critical section nesting. This
+variable has to be stored as part of the task context and must be initialised to
+a non zero value to ensure interrupts don't inadvertently become unmasked before
+the scheduler starts. As it is stored as part of the task context it will
+automatically be set to 0 when the first task is started. */
+volatile uint32_t ulCriticalNesting = 9999UL;
+
+/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then
+a floating point context must be saved and restored for the task. */
+volatile uint32_t ulPortTaskHasFPUContext = pdFALSE;
+
+/* Set to 1 to pend a context switch from an ISR. */
+volatile uint32_t ulPortYieldRequired = pdFALSE;
+
+/* Counts the interrupt nesting depth. A context switch is only performed if
+if the nesting depth is 0. */
+volatile uint32_t ulPortInterruptNesting = 0UL;
+
+/* Used in the asm file. */
+__attribute__(( used )) const uint32_t ulICCIAR = portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS;
+__attribute__(( used )) const uint32_t ulICCEOIR = portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS;
+__attribute__(( used )) const uint32_t ulICCPMR = portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS;
+__attribute__(( used )) const uint32_t ulMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+
+/*-----------------------------------------------------------*/
+
+/*
+ * See header file for description.
+ */
+StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
+{
+ /* Setup the initial stack of the task. The stack is set exactly as
+ expected by the portRESTORE_CONTEXT() macro.
+
+ The fist real value on the stack is the status register, which is set for
+ system mode, with interrupts enabled. A few NULLs are added first to ensure
+ GDB does not try decoding a non-existent return address. */
+ *pxTopOfStack = ( StackType_t ) NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portINITIAL_SPSR;
+
+ if( ( ( uint32_t ) pxCode & portTHUMB_MODE_ADDRESS ) != 0x00UL )
+ {
+ /* The task will start in THUMB mode. */
+ *pxTopOfStack |= portTHUMB_MODE_BIT;
+ }
+
+ pxTopOfStack--;
+
+ /* Next the return address, which in this case is the start of the task. */
+ *pxTopOfStack = ( StackType_t ) pxCode;
+ pxTopOfStack--;
+
+ /* Next all the registers other than the stack pointer. */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* R14 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212; /* R12 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111; /* R11 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010; /* R10 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909; /* R9 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808; /* R8 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707; /* R7 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606; /* R6 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505; /* R5 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404; /* R4 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303; /* R3 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202; /* R2 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ pxTopOfStack--;
+
+ /* The task will start with a critical nesting count of 0 as interrupts are
+ enabled. */
+ *pxTopOfStack = portNO_CRITICAL_NESTING;
+
+ #if( configUSE_TASK_FPU_SUPPORT == 1 )
+ {
+ /* The task will start without a floating point context. A task that
+ uses the floating point hardware must call vPortTaskUsesFPU() before
+ executing any floating point instructions. */
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
+ }
+ #elif( configUSE_TASK_FPU_SUPPORT == 2 )
+ {
+ /* The task will start with a floating point context. Leave enough
+ space for the registers - and ensure they are initialised to 0. */
+ pxTopOfStack -= portFPU_REGISTER_WORDS;
+ memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) );
+
+ pxTopOfStack--;
+ *pxTopOfStack = pdTRUE;
+ ulPortTaskHasFPUContext = pdTRUE;
+ }
+ #else
+ {
+ #error Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined.
+ }
+ #endif
+
+ return pxTopOfStack;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ /* A function that implements a task must not exit or attempt to return to
+ its caller as there is nothing to return to. If a task wants to exit it
+ should instead call vTaskDelete( NULL ).
+
+ Artificially force an assert() to be triggered if configASSERT() is
+ defined, then stop here so application writers can catch the error. */
+ configASSERT( ulPortInterruptNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+ for( ;; );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void )
+{
+uint32_t ulAPSR;
+
+ #if( configASSERT_DEFINED == 1 )
+ {
+ volatile uint8_t ucOriginalPriority;
+ volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET );
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine how many priority bits are implemented in the GIC.
+
+ Save the interrupt priority value that is about to be clobbered. */
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
+
+ /* Determine the number of priority bits available. First write to
+ all possible bits. */
+ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = *pucFirstUserPriorityRegister;
+
+ /* Shift to the least significant bits. */
+ while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET )
+ {
+ ucMaxPriorityValue >>= ( uint8_t ) 0x01;
+ }
+
+ /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
+ value. */
+ configASSERT( ucMaxPriorityValue == portLOWEST_INTERRUPT_PRIORITY );
+
+ /* Restore the clobbered interrupt priority register to its original
+ value. */
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
+ }
+ #endif /* configASSERT_DEFINED */
+
+
+ /* Only continue if the CPU is not in User mode. The CPU must be in a
+ Privileged mode for the scheduler to start. */
+ __asm volatile ( "MRS %0, APSR" : "=r" ( ulAPSR ) :: "memory" );
+ ulAPSR &= portAPSR_MODE_BITS_MASK;
+ configASSERT( ulAPSR != portAPSR_USER_MODE );
+
+ if( ulAPSR != portAPSR_USER_MODE )
+ {
+ /* Only continue if the binary point value is set to its lowest possible
+ setting. See the comments in vPortValidateInterruptPriority() below for
+ more information. */
+ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE );
+
+ if( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE )
+ {
+ /* Interrupts are turned off in the CPU itself to ensure tick does
+ not execute while the scheduler is being started. Interrupts are
+ automatically turned back on in the CPU when the first task starts
+ executing. */
+ portCPU_IRQ_DISABLE();
+
+ /* Start the timer that generates the tick ISR. */
+ configSETUP_TICK_INTERRUPT();
+
+ /* Start the first task executing. */
+ vPortRestoreTaskContext();
+ }
+ }
+
+ /* Will only get here if vTaskStartScheduler() was called with the CPU in
+ a non-privileged mode or the binary point register was not set to its lowest
+ possible value. prvTaskExitError() is referenced to prevent a compiler
+ warning about it being defined but not referenced in the case that the user
+ defines their own exit address. */
+ ( void ) prvTaskExitError;
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void )
+{
+ /* Not implemented in ports where there is nothing to return to.
+ Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void )
+{
+ /* Mask interrupts up to the max syscall interrupt priority. */
+ ulPortSetInterruptMask();
+
+ /* Now interrupts are disabled ulCriticalNesting can be accessed
+ directly. Increment ulCriticalNesting to keep a count of how many times
+ portENTER_CRITICAL() has been called. */
+ ulCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ assert() if it is being called from an interrupt context. Only API
+ functions that end in "FromISR" can be used in an interrupt. Only assert if
+ the critical nesting count is 1 to protect against recursive calls if the
+ assert function also uses a critical section. */
+ if( ulCriticalNesting == 1 )
+ {
+ configASSERT( ulPortInterruptNesting == 0 );
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void )
+{
+ if( ulCriticalNesting > portNO_CRITICAL_NESTING )
+ {
+ /* Decrement the nesting count as the critical section is being
+ exited. */
+ ulCriticalNesting--;
+
+ /* If the nesting level has reached zero then all interrupt
+ priorities must be re-enabled. */
+ if( ulCriticalNesting == portNO_CRITICAL_NESTING )
+ {
+ /* Critical nesting has reached zero so all interrupt priorities
+ should be unmasked. */
+ portCLEAR_INTERRUPT_MASK();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+void FreeRTOS_Tick_Handler( void )
+{
+ /* Set interrupt mask before altering scheduler structures. The tick
+ handler runs at the lowest priority, so interrupts cannot already be masked,
+ so there is no need to save and restore the current mask value. It is
+ necessary to turn off interrupts in the CPU itself while the ICCPMR is being
+ updated. */
+ portCPU_IRQ_DISABLE();
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm volatile ( "dsb \n"
+ "isb \n" ::: "memory" );
+ portCPU_IRQ_ENABLE();
+
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ ulPortYieldRequired = pdTRUE;
+ }
+
+ /* Ensure all interrupt priorities are active again. */
+ portCLEAR_INTERRUPT_MASK();
+ configCLEAR_TICK_INTERRUPT();
+}
+/*-----------------------------------------------------------*/
+
+#if( configUSE_TASK_FPU_SUPPORT != 2 )
+
+ void vPortTaskUsesFPU( void )
+ {
+ uint32_t ulInitialFPSCR = 0;
+
+ /* A task is registering the fact that it needs an FPU context. Set the
+ FPU flag (which is saved as part of the task context). */
+ ulPortTaskHasFPUContext = pdTRUE;
+
+ /* Initialise the floating point status register. */
+ __asm volatile ( "FMXR FPSCR, %0" :: "r" (ulInitialFPSCR) : "memory" );
+ }
+
+#endif /* configUSE_TASK_FPU_SUPPORT */
+/*-----------------------------------------------------------*/
+
+void vPortClearInterruptMask( uint32_t ulNewMaskValue )
+{
+ if( ulNewMaskValue == pdFALSE )
+ {
+ portCLEAR_INTERRUPT_MASK();
+ }
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulPortSetInterruptMask( void )
+{
+uint32_t ulReturn;
+
+ /* Interrupt in the CPU must be turned off while the ICCPMR is being
+ updated. */
+ portCPU_IRQ_DISABLE();
+ if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
+ {
+ /* Interrupts were already masked. */
+ ulReturn = pdTRUE;
+ }
+ else
+ {
+ ulReturn = pdFALSE;
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm volatile ( "dsb \n"
+ "isb \n" ::: "memory" );
+ }
+ portCPU_IRQ_ENABLE();
+
+ return ulReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if( configASSERT_DEFINED == 1 )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ /* The following assertion will fail if a service routine (ISR) for
+ an interrupt that has been assigned a priority above
+ configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ function. ISR safe FreeRTOS API functions must *only* be called
+ from interrupts that have been assigned a priority at or below
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ Numerically low interrupt priority numbers represent logically high
+ interrupt priorities, therefore the priority of the interrupt must
+ be set to a value equal to or numerically *higher* than
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ FreeRTOS maintains separate thread and ISR API functions to ensure
+ interrupt entry is as fast and simple as possible. */
+ configASSERT( portICCRPR_RUNNING_PRIORITY_REGISTER >= ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
+
+ /* Priority grouping: The interrupt controller (GIC) allows the bits
+ that define each interrupt's priority to be split between bits that
+ define the interrupt's pre-emption priority bits and bits that define
+ the interrupt's sub-priority. For simplicity all bits must be defined
+ to be pre-emption priority bits. The following assertion will fail if
+ this is not the case (if some bits represent a sub-priority).
+
+ The priority grouping is configured by the GIC's binary point register
+ (ICCBPR). Writting 0 to ICCBPR will ensure it is set to its lowest
+ possible value (which may be above 0). */
+ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE );
+ }
+
+#endif /* configASSERT_DEFINED */
+/*-----------------------------------------------------------*/
+
+void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR )
+{
+ ( void ) ulICCIAR;
+ configASSERT( ( volatile void * ) NULL );
+}
diff --git a/Source/portable/GCC/ARM_CA9/portASM.S b/Source/portable/GCC/ARM_CA9/portASM.S
new file mode 100644
index 0000000..c9eb16d
--- /dev/null
+++ b/Source/portable/GCC/ARM_CA9/portASM.S
@@ -0,0 +1,319 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+ .eabi_attribute Tag_ABI_align_preserved, 1
+ .text
+ .arm
+
+ .set SYS_MODE, 0x1f
+ .set SVC_MODE, 0x13
+ .set IRQ_MODE, 0x12
+
+ /* Hardware registers. */
+ .extern ulICCIAR
+ .extern ulICCEOIR
+ .extern ulICCPMR
+
+ /* Variables and functions. */
+ .extern ulMaxAPIPriorityMask
+ .extern _freertos_vector_table
+ .extern pxCurrentTCB
+ .extern vTaskSwitchContext
+ .extern vApplicationIRQHandler
+ .extern ulPortInterruptNesting
+ .extern ulPortTaskHasFPUContext
+
+ .global FreeRTOS_IRQ_Handler
+ .global FreeRTOS_SWI_Handler
+ .global vPortRestoreTaskContext
+
+
+
+
+.macro portSAVE_CONTEXT
+
+ /* Save the LR and SPSR onto the system mode stack before switching to
+ system mode to save the remaining system mode registers. */
+ SRSDB sp!, #SYS_MODE
+ CPS #SYS_MODE
+ PUSH {R0-R12, R14}
+
+ /* Push the critical nesting count. */
+ LDR R2, ulCriticalNestingConst
+ LDR R1, [R2]
+ PUSH {R1}
+
+ /* Does the task have a floating point context that needs saving? If
+ ulPortTaskHasFPUContext is 0 then no. */
+ LDR R2, ulPortTaskHasFPUContextConst
+ LDR R3, [R2]
+ CMP R3, #0
+
+ /* Save the floating point context, if any. */
+ FMRXNE R1, FPSCR
+ VPUSHNE {D0-D15}
+ VPUSHNE {D16-D31}
+ PUSHNE {R1}
+
+ /* Save ulPortTaskHasFPUContext itself. */
+ PUSH {R3}
+
+ /* Save the stack pointer in the TCB. */
+ LDR R0, pxCurrentTCBConst
+ LDR R1, [R0]
+ STR SP, [R1]
+
+ .endm
+
+; /**********************************************************************/
+
+.macro portRESTORE_CONTEXT
+
+ /* Set the SP to point to the stack of the task being restored. */
+ LDR R0, pxCurrentTCBConst
+ LDR R1, [R0]
+ LDR SP, [R1]
+
+ /* Is there a floating point context to restore? If the restored
+ ulPortTaskHasFPUContext is zero then no. */
+ LDR R0, ulPortTaskHasFPUContextConst
+ POP {R1}
+ STR R1, [R0]
+ CMP R1, #0
+
+ /* Restore the floating point context, if any. */
+ POPNE {R0}
+ VPOPNE {D16-D31}
+ VPOPNE {D0-D15}
+ VMSRNE FPSCR, R0
+
+ /* Restore the critical section nesting depth. */
+ LDR R0, ulCriticalNestingConst
+ POP {R1}
+ STR R1, [R0]
+
+ /* Ensure the priority mask is correct for the critical nesting depth. */
+ LDR R2, ulICCPMRConst
+ LDR R2, [R2]
+ CMP R1, #0
+ MOVEQ R4, #255
+ LDRNE R4, ulMaxAPIPriorityMaskConst
+ LDRNE R4, [R4]
+ STR R4, [R2]
+
+ /* Restore all system mode registers other than the SP (which is already
+ being used). */
+ POP {R0-R12, R14}
+
+ /* Return to the task code, loading CPSR on the way. */
+ RFEIA sp!
+
+ .endm
+
+
+
+
+/******************************************************************************
+ * SVC handler is used to start the scheduler.
+ *****************************************************************************/
+.align 4
+.type FreeRTOS_SWI_Handler, %function
+FreeRTOS_SWI_Handler:
+ /* Save the context of the current task and select a new task to run. */
+ portSAVE_CONTEXT
+ LDR R0, vTaskSwitchContextConst
+ BLX R0
+ portRESTORE_CONTEXT
+
+
+/******************************************************************************
+ * vPortRestoreTaskContext is used to start the scheduler.
+ *****************************************************************************/
+.type vPortRestoreTaskContext, %function
+vPortRestoreTaskContext:
+ /* Switch to system mode. */
+ CPS #SYS_MODE
+ portRESTORE_CONTEXT
+
+.align 4
+.type FreeRTOS_IRQ_Handler, %function
+FreeRTOS_IRQ_Handler:
+ /* Return to the interrupted instruction. */
+ SUB lr, lr, #4
+
+ /* Push the return address and SPSR. */
+ PUSH {lr}
+ MRS lr, SPSR
+ PUSH {lr}
+
+ /* Change to supervisor mode to allow reentry. */
+ CPS #SVC_MODE
+
+ /* Push used registers. */
+ PUSH {r0-r4, r12}
+
+ /* Increment nesting count. r3 holds the address of ulPortInterruptNesting
+ for future use. r1 holds the original ulPortInterruptNesting value for
+ future use. */
+ LDR r3, ulPortInterruptNestingConst
+ LDR r1, [r3]
+ ADD r4, r1, #1
+ STR r4, [r3]
+
+ /* Read value from the interrupt acknowledge register, which is stored in r0
+ for future parameter and interrupt clearing use. */
+ LDR r2, ulICCIARConst
+ LDR r2, [r2]
+ LDR r0, [r2]
+
+ /* Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for
+ future use. _RB_ Does this ever actually need to be done provided the start
+ of the stack is 8-byte aligned? */
+ MOV r2, sp
+ AND r2, r2, #4
+ SUB sp, sp, r2
+
+ /* Call the interrupt handler. r4 pushed to maintain alignment. */
+ PUSH {r0-r4, lr}
+ LDR r1, vApplicationIRQHandlerConst
+ BLX r1
+ POP {r0-r4, lr}
+ ADD sp, sp, r2
+
+ CPSID i
+ DSB
+ ISB
+
+ /* Write the value read from ICCIAR to ICCEOIR. */
+ LDR r4, ulICCEOIRConst
+ LDR r4, [r4]
+ STR r0, [r4]
+
+ /* Restore the old nesting count. */
+ STR r1, [r3]
+
+ /* A context switch is never performed if the nesting count is not 0. */
+ CMP r1, #0
+ BNE exit_without_switch
+
+ /* Did the interrupt request a context switch? r1 holds the address of
+ ulPortYieldRequired and r0 the value of ulPortYieldRequired for future
+ use. */
+ LDR r1, =ulPortYieldRequired
+ LDR r0, [r1]
+ CMP r0, #0
+ BNE switch_before_exit
+
+exit_without_switch:
+ /* No context switch. Restore used registers, LR_irq and SPSR before
+ returning. */
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ MOVS PC, LR
+
+switch_before_exit:
+ /* A context swtich is to be performed. Clear the context switch pending
+ flag. */
+ MOV r0, #0
+ STR r0, [r1]
+
+ /* Restore used registers, LR-irq and SPSR before saving the context
+ to the task stack. */
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ portSAVE_CONTEXT
+
+ /* Call the function that selects the new task to execute.
+ vTaskSwitchContext() if vTaskSwitchContext() uses LDRD or STRD
+ instructions, or 8 byte aligned stack allocated data. LR does not need
+ saving as a new LR will be loaded by portRESTORE_CONTEXT anyway. */
+ LDR R0, vTaskSwitchContextConst
+ BLX R0
+
+ /* Restore the context of, and branch to, the task selected to execute
+ next. */
+ portRESTORE_CONTEXT
+
+
+/******************************************************************************
+ * If the application provides an implementation of vApplicationIRQHandler(),
+ * then it will get called directly without saving the FPU registers on
+ * interrupt entry, and this weak implementation of
+ * vApplicationIRQHandler() will not get called.
+ *
+ * If the application provides its own implementation of
+ * vApplicationFPUSafeIRQHandler() then this implementation of
+ * vApplicationIRQHandler() will be called, save the FPU registers, and then
+ * call vApplicationFPUSafeIRQHandler().
+ *
+ * Therefore, if the application writer wants FPU registers to be saved on
+ * interrupt entry their IRQ handler must be called
+ * vApplicationFPUSafeIRQHandler(), and if the application writer does not want
+ * FPU registers to be saved on interrupt entry their IRQ handler must be
+ * called vApplicationIRQHandler().
+ *****************************************************************************/
+
+.align 4
+.weak vApplicationIRQHandler
+.type vApplicationIRQHandler, %function
+vApplicationIRQHandler:
+ PUSH {LR}
+ FMRX R1, FPSCR
+ VPUSH {D0-D15}
+ VPUSH {D16-D31}
+ PUSH {R1}
+
+ LDR r1, vApplicationFPUSafeIRQHandlerConst
+ BLX r1
+
+ POP {R0}
+ VPOP {D16-D31}
+ VPOP {D0-D15}
+ VMSR FPSCR, R0
+
+ POP {PC}
+
+
+ulICCIARConst: .word ulICCIAR
+ulICCEOIRConst: .word ulICCEOIR
+ulICCPMRConst: .word ulICCPMR
+pxCurrentTCBConst: .word pxCurrentTCB
+ulCriticalNestingConst: .word ulCriticalNesting
+ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext
+ulMaxAPIPriorityMaskConst: .word ulMaxAPIPriorityMask
+vTaskSwitchContextConst: .word vTaskSwitchContext
+vApplicationIRQHandlerConst: .word vApplicationIRQHandler
+ulPortInterruptNestingConst: .word ulPortInterruptNesting
+vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler
+
+.end
diff --git a/Source/portable/GCC/ARM_CA9/portmacro.h b/Source/portable/GCC/ARM_CA9/portmacro.h
new file mode 100644
index 0000000..a88b3d8
--- /dev/null
+++ b/Source/portable/GCC/ARM_CA9/portmacro.h
@@ -0,0 +1,211 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*-----------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *-----------------------------------------------------------
+ */
+
+/* Type definitions. */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+typedef uint32_t TickType_t;
+#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+not need to be guarded with a critical section. */
+#define portTICK_TYPE_IS_ATOMIC 1
+
+/*-----------------------------------------------------------*/
+
+/* Hardware specifics. */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+
+/*-----------------------------------------------------------*/
+
+/* Task utilities. */
+
+/* Called at the end of an ISR that can cause a context switch. */
+#define portEND_SWITCHING_ISR( xSwitchRequired )\
+{ \
+extern uint32_t ulPortYieldRequired; \
+ \
+ if( xSwitchRequired != pdFALSE ) \
+ { \
+ ulPortYieldRequired = pdTRUE; \
+ } \
+}
+
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() __asm volatile ( "SWI 0" ::: "memory" );
+
+
+/*-----------------------------------------------------------
+ * Critical section control
+ *----------------------------------------------------------*/
+
+extern void vPortEnterCritical( void );
+extern void vPortExitCritical( void );
+extern uint32_t ulPortSetInterruptMask( void );
+extern void vPortClearInterruptMask( uint32_t ulNewMaskValue );
+extern void vPortInstallFreeRTOSVectorTable( void );
+
+/* These macros do not globally disable/enable interrupts. They do mask off
+interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
+#define portENTER_CRITICAL() vPortEnterCritical();
+#define portEXIT_CRITICAL() vPortExitCritical();
+#define portDISABLE_INTERRUPTS() ulPortSetInterruptMask()
+#define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 )
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
+
+/*-----------------------------------------------------------*/
+
+/* Task function macros as described on the FreeRTOS.org WEB site. These are
+not required for this port but included in case common demo code that uses these
+macros is used. */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
+
+/* Prototype of the FreeRTOS tick handler. This must be installed as the
+handler for whichever peripheral is used to generate the RTOS tick. */
+void FreeRTOS_Tick_Handler( void );
+
+/* If configUSE_TASK_FPU_SUPPORT is set to 1 (or left undefined) then tasks are
+created without an FPU context and must call vPortTaskUsesFPU() to give
+themselves an FPU context before using any FPU instructions. If
+configUSE_TASK_FPU_SUPPORT is set to 2 then all tasks will have an FPU context
+by default. */
+#if( configUSE_TASK_FPU_SUPPORT != 2 )
+ void vPortTaskUsesFPU( void );
+#else
+ /* Each task has an FPU context already, so define this function away to
+ nothing to prevent it being called accidentally. */
+ #define vPortTaskUsesFPU()
+#endif
+#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
+
+#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
+#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL )
+
+/* Architecture specific optimisations. */
+#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
+ #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
+#endif
+
+#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+
+ /* Store/clear the ready priorities in a bit map. */
+ #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
+ #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
+
+ /*-----------------------------------------------------------*/
+
+ #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) __builtin_clz( uxReadyPriorities ) )
+
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+#ifdef configASSERT
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+#endif /* configASSERT */
+
+#define portNOP() __asm volatile( "NOP" )
+#define portINLINE __inline
+
+/* The number of bits to shift for an interrupt priority is dependent on the
+number of bits implemented by the interrupt controller. */
+#if configUNIQUE_INTERRUPT_PRIORITIES == 16
+ #define portPRIORITY_SHIFT 4
+ #define portMAX_BINARY_POINT_VALUE 3
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 32
+ #define portPRIORITY_SHIFT 3
+ #define portMAX_BINARY_POINT_VALUE 2
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 64
+ #define portPRIORITY_SHIFT 2
+ #define portMAX_BINARY_POINT_VALUE 1
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 128
+ #define portPRIORITY_SHIFT 1
+ #define portMAX_BINARY_POINT_VALUE 0
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 256
+ #define portPRIORITY_SHIFT 0
+ #define portMAX_BINARY_POINT_VALUE 0
+#else
+ #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware
+#endif
+
+/* Interrupt controller access addresses. */
+#define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C )
+#define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 )
+#define portICCBPR_BINARY_POINT_OFFSET ( 0x08 )
+#define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 )
+
+#define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET )
+#define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET )
+#define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) )
+#define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) )
+
+#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" )
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM0/port.c b/Source/portable/GCC/ARM_CM0/port.c
index 3a93cea..063a33e 100644
--- a/Source/portable/GCC/ARM_CM0/port.c
+++ b/Source/portable/GCC/ARM_CM0/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -204,24 +204,24 @@
* table offset register that can be used to locate the initial stack value.
* Not all M0 parts have the application vector table at address 0. */
__asm volatile (
- " .syntax unified \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Obtain location of pxCurrentTCB. */
- " ldr r3, [r2] \n"
- " ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " movs r0, #2 \n"/* Switch to the psp stack. */
- " msr CONTROL, r0 \n"
- " isb \n"
- " pop {r0-r5} \n"/* Pop the registers that are saved automatically. */
- " mov lr, r5 \n"/* lr is now in r5. */
- " pop {r3} \n"/* Return address is now in r3. */
- " pop {r2} \n"/* Pop and discard XPSR. */
- " cpsie i \n"/* The first task has its context and interrupts can be enabled. */
- " bx r3 \n"/* Finally, jump to the user defined task code. */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB "
+ " .syntax unified \n"
+ " ldr r2, pxCurrentTCBConst2 \n"/* Obtain location of pxCurrentTCB. */
+ " ldr r3, [r2] \n"
+ " ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " movs r0, #2 \n"/* Switch to the psp stack. */
+ " msr CONTROL, r0 \n"
+ " isb \n"
+ " pop {r0-r5} \n"/* Pop the registers that are saved automatically. */
+ " mov lr, r5 \n"/* lr is now in r5. */
+ " pop {r3} \n"/* Return address is now in r3. */
+ " pop {r2} \n"/* Pop and discard XPSR. */
+ " cpsie i \n"/* The first task has its context and interrupts can be enabled. */
+ " bx r3 \n"/* Finally, jump to the user defined task code. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB "
);
}
/*-----------------------------------------------------------*/
@@ -303,9 +303,9 @@
uint32_t ulSetInterruptMaskFromISR( void )
{
__asm volatile (
- " mrs r0, PRIMASK \n"
- " cpsid i \n"
- " bx lr "
+ " mrs r0, PRIMASK \n"
+ " cpsid i \n"
+ " bx lr "
::: "memory"
);
}
@@ -314,8 +314,8 @@
void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask )
{
__asm volatile (
- " msr PRIMASK, r0 \n"
- " bx lr "
+ " msr PRIMASK, r0 \n"
+ " bx lr "
::: "memory"
);
}
@@ -327,45 +327,45 @@
__asm volatile
(
- " .syntax unified \n"
- " mrs r0, psp \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " subs r0, r0, #32 \n"/* Make space for the remaining low registers. */
- " str r0, [r2] \n"/* Save the new top of stack. */
- " stmia r0!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* Store the high registers. */
- " mov r5, r9 \n"
- " mov r6, r10 \n"
- " mov r7, r11 \n"
- " stmia r0!, {r4-r7} \n"
- " \n"
- " push {r3, r14} \n"
- " cpsid i \n"
- " bl vTaskSwitchContext \n"
- " cpsie i \n"
- " pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */
- " \n"
- " ldr r1, [r2] \n"
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " adds r0, r0, #16 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Pop the high registers. */
- " mov r8, r4 \n"
- " mov r9, r5 \n"
- " mov r10, r6 \n"
- " mov r11, r7 \n"
- " \n"
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " \n"
- " subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */
- " ldmia r0!, {r4-r7} \n"/* Pop low registers. */
- " \n"
- " bx r3 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB "
+ " .syntax unified \n"
+ " mrs r0, psp \n"
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
+ " ldr r2, [r3] \n"
+ " \n"
+ " subs r0, r0, #32 \n"/* Make space for the remaining low registers. */
+ " str r0, [r2] \n"/* Save the new top of stack. */
+ " stmia r0!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
+ " mov r4, r8 \n"/* Store the high registers. */
+ " mov r5, r9 \n"
+ " mov r6, r10 \n"
+ " mov r7, r11 \n"
+ " stmia r0!, {r4-r7} \n"
+ " \n"
+ " push {r3, r14} \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */
+ " \n"
+ " ldr r1, [r2] \n"
+ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
+ " adds r0, r0, #16 \n"/* Move to the high registers. */
+ " ldmia r0!, {r4-r7} \n"/* Pop the high registers. */
+ " mov r8, r4 \n"
+ " mov r9, r5 \n"
+ " mov r10, r6 \n"
+ " mov r11, r7 \n"
+ " \n"
+ " msr psp, r0 \n"/* Remember the new top of stack for the task. */
+ " \n"
+ " subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */
+ " ldmia r0!, {r4-r7} \n"/* Pop low registers. */
+ " \n"
+ " bx r3 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB "
);
}
/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM0/portmacro.h b/Source/portable/GCC/ARM_CM0/portmacro.h
index c56d47e..46f308d 100644
--- a/Source/portable/GCC/ARM_CM0/portmacro.h
+++ b/Source/portable/GCC/ARM_CM0/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,11 +28,13 @@
#ifndef PORTMACRO_H
- #define PORTMACRO_H
+#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -45,81 +47,118 @@
*/
/* Type definitions. */
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/* Architecture specifics. */
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portDONT_DISCARD __attribute__( ( used ) )
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portDONT_DISCARD __attribute__( ( used ) )
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
- extern void vPortYield( void );
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portYIELD() vPortYield()
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+extern void vPortYield( void );
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portYIELD() vPortYield()
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Critical section management. */
- extern void vPortEnterCritical( void );
- extern void vPortExitCritical( void );
- extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) );
- extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) );
+extern void vPortEnterCritical( void );
+extern void vPortExitCritical( void );
+extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) );
+extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) );
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
- #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
- #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
+#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
+#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/* Tickless idle/low power functionality. */
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portNOP()
+#define portNOP()
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
- #ifdef __cplusplus
- }
- #endif
+
+#define portINLINE __inline
+
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+
+/*-----------------------------------------------------------*/
+
+portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..b8164a0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2176 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/port.c b/Source/portable/GCC/ARM_CM23/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/GCC/ARM_CM23/non_secure/port.c
+++ b/Source/portable/GCC/ARM_CM23/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/portasm.c b/Source/portable/GCC/ARM_CM23/non_secure/portasm.c
index c3a9782..b23defe 100644
--- a/Source/portable/GCC/ARM_CM23/non_secure/portasm.c
+++ b/Source/portable/GCC/ARM_CM23/non_secure/portasm.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@@ -44,112 +47,156 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
-void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r2] \n"/* Program RNR = 4. */
- " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r2] \n"/* Program RNR = 5. */
- " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r2] \n"/* Program RNR = 6. */
- " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r2] \n"/* Program RNR = 7. */
- " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
- "xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ "xSecureContextConst2: .word xSecureContext \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " movs r1, #1 \n"/* r1 = 1. */
- " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
- " beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
- " movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " bx lr \n"/* Return. */
- " running_privileged: \n"
- " movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
+ " beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
+ " movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " bx lr \n" /* Return. */
+ " running_privileged: \n"
+ " movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "r1", "memory"
);
}
@@ -159,13 +206,13 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* Read the CONTROL register. */
- " movs r1, #1 \n"/* r1 = 1. */
- " bics r0, r1 \n"/* Clear the bit 0. */
- " msr control, r0 \n"/* Write back the new CONTROL value. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@@ -175,13 +222,13 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " movs r1, #1 \n"/* r1 = 1. */
- " orrs r0, r1 \n"/* r0 = r0 | r1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* r0 = r0 | r1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@@ -191,20 +238,20 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
- " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
- " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start the first task. */
- " nop \n"
- " \n"
- " .align 4 \n"
- "xVTORConst: .word 0xe000ed08 \n"
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
@@ -214,11 +261,11 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, PRIMASK \n"
- " cpsid i \n"
- " bx lr \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, PRIMASK \n"
+ " cpsid i \n"
+ " bx lr \n"
::: "memory"
);
}
@@ -228,231 +275,346 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " msr PRIMASK, r0 \n"
- " bx lr \n"
+ " .syntax unified \n"
+ " \n"
+ " msr PRIMASK, r0 \n"
+ " bx lr \n"
::: "memory"
);
}
/*-----------------------------------------------------------*/
-void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " .extern SecureContext_SaveContext \n"
- " .extern SecureContext_LoadContext \n"
- " \n"
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
- " mrs r2, psp \n"/* Read PSP in r2. */
- " \n"
- " cbz r0, save_ns_context \n"/* No secure context to save. */
- " push {r0-r2, r14} \n"
- " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r0-r3} \n"/* LR is now in r3. */
- " mov lr, r3 \n"/* LR = r3. */
- " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
- " b select_next_task \n"
- " \n"
- " save_ns_context: \n"
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #48 \n"/* r2 = r2 - 48. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
- " \n"
- " select_next_task: \n"
- " cpsid i \n"
- " bl vTaskSwitchContext \n"
- " cpsie i \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r4] \n"/* Program RNR = 4. */
- " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r4] \n"/* Program RNR = 5. */
- " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r4] \n"/* Program RNR = 6. */
- " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r4] \n"/* Program RNR = 7. */
- " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
- " \n"
- " restore_ns_context: \n"
- " adds r2, r2, #16 \n"/* Move to the high registers. */
- " ldmia r2!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " subs r2, r2, #32 \n"/* Go back to the low registers. */
- " ldmia r2!, {r4-r7} \n"/* Restore the low registers that are not automatically restored. */
- " bx lr \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
- "xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* Restore LR. */
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " stmia r2!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r2!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " mov r6, lr \n" /* r6 = LR. */
+ " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r4} \n" /* LR is now in r4. */
+ " mov lr, r4 \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ " mrs r2, psp \n" /* Read PSP in r2. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " push {r0-r2, r14} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* LR = r3. */
+ " lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " b select_next_task \n"
+ " \n"
+ " save_ns_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3-r7} \n" /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r2!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+ " \n"
+ " ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n" /* LR = r4. */
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n" /* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n" /* LR = r4. */
+ " lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " restore_ns_context: \n"
+ " adds r2, r2, #16 \n" /* Move to the high registers. */
+ " ldmia r2!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " subs r2, r2, #32 \n" /* Go back to the low registers. */
+ " ldmia r2!, {r4-r7} \n" /* Restore the low registers that are not automatically restored. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ "xSecureContextConst: .word xSecureContext \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
-void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " movs r0, #4 \n"
- " mov r1, lr \n"
- " tst r0, r1 \n"
- " beq stacking_used_msp \n"
- " mrs r0, psp \n"
- " ldr r2, svchandler_address_const \n"
- " bx r2 \n"
- " stacking_used_msp: \n"
- " mrs r0, msp \n"
- " ldr r2, svchandler_address_const \n"
- " bx r2 \n"
- " \n"
- " .align 4 \n"
- "svchandler_address_const: .word vPortSVCHandler_C \n"
- );
-}
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r3, [r0, #24] \n"
+ " subs r3, #2 \n"
+ " ldrb r2, [r3, #0] \n"
+ " cmp r2, %0 \n"
+ " blt system_call_enter \n"
+ " cmp r2, %1 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " movs r0, #4 \n"
+ " mov r1, lr \n"
+ " tst r0, r1 \n"
+ " beq stacking_used_msp \n"
+ " mrs r0, psp \n"
+ " ldr r2, svchandler_address_const \n"
+ " bx r2 \n"
+ " stacking_used_msp: \n"
+ " mrs r0, msp \n"
+ " ldr r2, svchandler_address_const \n"
+ " bx r2 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " svc %0 \n"/* Secure context is allocated in the supervisor call. */
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " svc %0 \n" /* Secure context is allocated in the supervisor call. */
+ " bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@@ -462,16 +624,16 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
- " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
- " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
- " bne free_secure_context \n"/* Branch if r1 != 0. */
- " bx lr \n"/* There is no secure context (xSecureContext is NULL). */
- " free_secure_context: \n"
- " svc %0 \n"/* Secure context is freed in the supervisor call. */
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
+ " ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
+ " cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
+ " bne free_secure_context \n" /* Branch if r1 != 0. */
+ " bx lr \n" /* There is no secure context (xSecureContext is NULL). */
+ " free_secure_context: \n"
+ " svc %0 \n" /* Secure context is freed in the supervisor call. */
+ " bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/portasm.h b/Source/portable/GCC/ARM_CM23/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/GCC/ARM_CM23/non_secure/portasm.h
+++ b/Source/portable/GCC/ARM_CM23/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM23/non_secure/portmacro.h
index 6852153..d8dab92 100644
--- a/Source/portable/GCC/ARM_CM23/non_secure/portmacro.h
+++ b/Source/portable/GCC/ARM_CM23/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -48,11 +48,16 @@
/**
* Architecture specifics.
*/
-#define portARCH_NAME "Cortex-M23"
-#define portDONT_DISCARD __attribute__( ( used ) )
+#define portARCH_NAME "Cortex-M23"
+#define portHAS_BASEPRI 0
+#define portDONT_DISCARD __attribute__( ( used ) )
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+#if ( configTOTAL_MPU_REGIONS == 16 )
#error 16 MPU regions are not yet supported for this port.
#endif
/*-----------------------------------------------------------*/
@@ -60,12 +65,14 @@
/**
* @brief Critical section management.
*/
-#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
-#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
+#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
+#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
+++ b/Source/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_context.c b/Source/portable/GCC/ARM_CM23/secure/secure_context.c
index 1996693..e37dd96 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_context.c
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_context.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_context.h b/Source/portable/GCC/ARM_CM23/secure/secure_context.h
index de33d15..2220ea6 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_context.h
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_context.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_context_port.c b/Source/portable/GCC/ARM_CM23/secure/secure_context_port.c
index 3331fc3..ce35340 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_context_port.c
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_context_port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_heap.c b/Source/portable/GCC/ARM_CM23/secure/secure_heap.c
index b3bf007..19f7c23 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_heap.c
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_heap.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_heap.h b/Source/portable/GCC/ARM_CM23/secure/secure_heap.h
index e469f2c..75c9cb0 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_heap.h
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_heap.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_init.c b/Source/portable/GCC/ARM_CM23/secure/secure_init.c
index f6570d8..f93bfce 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_init.c
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_init.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_init.h b/Source/portable/GCC/ARM_CM23/secure/secure_init.h
index e89af71..e6c9da0 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_init.h
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_init.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23/secure/secure_port_macros.h b/Source/portable/GCC/ARM_CM23/secure/secure_port_macros.h
index 2fb7c59..d7ac583 100644
--- a/Source/portable/GCC/ARM_CM23/secure/secure_port_macros.h
+++ b/Source/portable/GCC/ARM_CM23/secure/secure_port_macros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..b8164a0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2176 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
index b668d15..b90da71 100644
--- a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@@ -44,107 +47,150 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
-void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r2 \n" /* Finally, branch to EXC_RETURN. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " movs r1, #1 \n"/* r1 = 1. */
- " tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
- " beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
- " movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " bx lr \n"/* Return. */
- " running_privileged: \n"
- " movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
+ " beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
+ " movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " bx lr \n" /* Return. */
+ " running_privileged: \n"
+ " movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "r1", "memory"
);
}
@@ -154,13 +200,13 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* Read the CONTROL register. */
- " movs r1, #1 \n"/* r1 = 1. */
- " bics r0, r1 \n"/* Clear the bit 0. */
- " msr control, r0 \n"/* Write back the new CONTROL value. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@@ -170,13 +216,13 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " movs r1, #1 \n"/* r1 = 1. */
- " orrs r0, r1 \n"/* r0 = r0 | r1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* r0 = r0 | r1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@@ -186,20 +232,20 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
- " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
- " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start the first task. */
- " nop \n"
- " \n"
- " .align 4 \n"
- "xVTORConst: .word 0xe000ed08 \n"
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
@@ -209,11 +255,11 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, PRIMASK \n"
- " cpsid i \n"
- " bx lr \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, PRIMASK \n"
+ " cpsid i \n"
+ " bx lr \n"
::: "memory"
);
}
@@ -223,159 +269,260 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " msr PRIMASK, r0 \n"
- " bx lr \n"
+ " .syntax unified \n"
+ " \n"
+ " msr PRIMASK, r0 \n"
+ " bx lr \n"
::: "memory"
);
}
/*-----------------------------------------------------------*/
-void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " mrs r0, psp \n"/* Read PSP in r0. */
- " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #else /* configENABLE_MPU */
- " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
- " \n"
- " cpsid i \n"
- " bl vTaskSwitchContext \n"
- " cpsie i \n"
- " \n"
- " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " adds r0, r0, #28 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- " bx r3 \n"
- #else /* configENABLE_MPU */
- " adds r0, r0, #24 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- " bx r3 \n"
- #endif /* configENABLE_MPU */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ " stmia r1!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r1!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " mov r5, lr \n" /* r5 = LR. */
+ " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, psp \n" /* Read PSP in r0. */
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " subs r0, r0, #40 \n" /* Make space for PSPLIM, LR and the remaining registers on the stack. */
+ " str r0, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r2, psplim \n" /* r2 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r0!, {r2-r7} \n" /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r0!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
+ " \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+ " \n"
+ " adds r0, r0, #24 \n" /* Move to the high registers. */
+ " ldmia r0!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " msr psp, r0 \n" /* Remember the new top of stack for the task. */
+ " subs r0, r0, #40 \n" /* Move to the starting of the saved context. */
+ " ldmia r0!, {r2-r7} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
+ " msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
+ " bx r3 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
-void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " movs r0, #4 \n"
- " mov r1, lr \n"
- " tst r0, r1 \n"
- " beq stacking_used_msp \n"
- " mrs r0, psp \n"
- " ldr r2, svchandler_address_const \n"
- " bx r2 \n"
- " stacking_used_msp: \n"
- " mrs r0, msp \n"
- " ldr r2, svchandler_address_const \n"
- " bx r2 \n"
- " \n"
- " .align 4 \n"
- "svchandler_address_const: .word vPortSVCHandler_C \n"
- );
-}
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r3, [r0, #24] \n"
+ " subs r3, #2 \n"
+ " ldrb r2, [r3, #0] \n"
+ " cmp r2, %0 \n"
+ " blt system_call_enter \n"
+ " cmp r2, %1 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " movs r0, #4 \n"
+ " mov r1, lr \n"
+ " tst r0, r1 \n"
+ " beq stacking_used_msp \n"
+ " mrs r0, psp \n"
+ " ldr r2, svchandler_address_const \n"
+ " bx r2 \n"
+ " stacking_used_msp: \n"
+ " mrs r0, msp \n"
+ " ldr r2, svchandler_address_const \n"
+ " bx r2 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.h b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.h
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h
index 6852153..d8dab92 100644
--- a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -48,11 +48,16 @@
/**
* Architecture specifics.
*/
-#define portARCH_NAME "Cortex-M23"
-#define portDONT_DISCARD __attribute__( ( used ) )
+#define portARCH_NAME "Cortex-M23"
+#define portHAS_BASEPRI 0
+#define portDONT_DISCARD __attribute__( ( used ) )
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+#if ( configTOTAL_MPU_REGIONS == 16 )
#error 16 MPU regions are not yet supported for this port.
#endif
/*-----------------------------------------------------------*/
@@ -60,12 +65,14 @@
/**
* @brief Critical section management.
*/
-#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
-#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
+#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
+#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
+++ b/Source/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM3/port.c b/Source/portable/GCC/ARM_CM3/port.c
index a5f4fd5..cae2c53 100644
--- a/Source/portable/GCC/ARM_CM3/port.c
+++ b/Source/portable/GCC/ARM_CM3/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -34,13 +34,6 @@
#include "FreeRTOS.h"
#include "task.h"
-/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is
- * defined. The value should also ensure backward compatibility.
- * FreeRTOS.org versions prior to V4.4.0 did not include this definition. */
-#ifndef configKERNEL_INTERRUPT_PRIORITY
- #define configKERNEL_INTERRUPT_PRIORITY 255
-#endif
-
/* Constants required to manipulate the core. Registers first... */
#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
@@ -55,8 +48,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -225,19 +219,19 @@
void vPortSVCHandler( void )
{
__asm volatile (
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldmia r0!, {r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " orr r14, #0xd \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
+ " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
+ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
+ " ldmia r0!, {r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
+ " msr psp, r0 \n"/* Restore the task stack pointer. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " orr r14, #0xd \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
/*-----------------------------------------------------------*/
@@ -245,17 +239,17 @@
static void prvPortStartFirstTask( void )
{
__asm volatile (
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc 0 \n"/* System call to start first task. */
- " nop \n"
- " .ltorg \n"
+ " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
+ " cpsie i \n"/* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc 0 \n"/* System call to start first task. */
+ " nop \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -265,13 +259,10 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -281,7 +272,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -293,33 +284,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -328,7 +339,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -403,32 +414,32 @@
__asm volatile
(
- " mrs r0, psp \n"
- " isb \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " stmdb r0!, {r4-r11} \n"/* Save the remaining registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
- " \n"
- " stmdb sp!, {r3, r14} \n"
- " mov r0, %0 \n"
- " msr basepri, r0 \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r3, r14} \n"
- " \n"/* Restore the context, including the critical nesting count. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldmia r0!, {r4-r11} \n"/* Pop the registers. */
- " msr psp, r0 \n"
- " isb \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " mrs r0, psp \n"
+ " isb \n"
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
+ " ldr r2, [r3] \n"
+ " \n"
+ " stmdb r0!, {r4-r11} \n"/* Save the remaining registers. */
+ " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
+ " \n"
+ " stmdb sp!, {r3, r14} \n"
+ " mov r0, %0 \n"
+ " msr basepri, r0 \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " ldmia sp!, {r3, r14} \n"
+ " \n"/* Restore the context, including the critical nesting count. */
+ " ldr r1, [r3] \n"
+ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
+ " ldmia r0!, {r4-r11} \n"/* Pop the registers. */
+ " msr psp, r0 \n"
+ " isb \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -728,10 +739,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
@@ -758,4 +769,4 @@
configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
}
-#endif /* configASSERT_DEFINED */
+#endif /* configASSERT_DEFINED */
\ No newline at end of file
diff --git a/Source/portable/GCC/ARM_CM3/portmacro.h b/Source/portable/GCC/ARM_CM3/portmacro.h
index d9c1c1b..bdedf5a 100644
--- a/Source/portable/GCC/ARM_CM3/portmacro.h
+++ b/Source/portable/GCC/ARM_CM3/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -30,9 +30,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -57,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -199,10 +203,10 @@
__asm volatile
(
- " mov %0, %1 \n"\
- " msr basepri, %0 \n"\
- " isb \n"\
- " dsb \n"\
+ " mov %0, %1 \n"\
+ " msr basepri, %0 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -215,11 +219,11 @@
__asm volatile
(
- " mrs %0, basepri \n"\
- " mov %1, %2 \n"\
- " msr basepri, %1 \n"\
- " isb \n"\
- " dsb \n"\
+ " mrs %0, basepri \n"\
+ " mov %1, %2 \n"\
+ " msr basepri, %1 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -233,15 +237,17 @@
{
__asm volatile
(
- " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
+ " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/port.c b/Source/portable/GCC/ARM_CM33/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/GCC/ARM_CM33/non_secure/port.c
+++ b/Source/portable/GCC/ARM_CM33/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/portasm.c b/Source/portable/GCC/ARM_CM33/non_secure/portasm.c
index 3424b42..7431c98 100644
--- a/Source/portable/GCC/ARM_CM33/non_secure/portasm.c
+++ b/Source/portable/GCC/ARM_CM33/non_secure/portasm.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -36,115 +36,143 @@
/* Portasm includes. */
#include "portasm.h"
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
-void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
#if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
- "xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ "xSecureContextConst2: .word xSecureContext \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- " ite ne \n"
- " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "memory"
);
}
@@ -154,12 +182,12 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* Read the CONTROL register. */
- " bic r0, #1 \n"/* Clear the bit 0. */
- " msr control, r0 \n"/* Write back the new CONTROL value. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -169,12 +197,12 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " orr r0, #1 \n"/* r0 = r0 | 1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -184,21 +212,21 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
- " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
- " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start the first task. */
- " nop \n"
- " \n"
- " .align 4 \n"
- "xVTORConst: .word 0xe000ed08 \n"
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
@@ -208,14 +236,14 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
- " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " dsb \n"
- " isb \n"
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -225,228 +253,338 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " msr basepri, r0 \n"/* basepri = ulMask. */
- " dsb \n"
- " isb \n"
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
::: "memory"
);
}
/*-----------------------------------------------------------*/
-void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " .extern SecureContext_SaveContext \n"
- " .extern SecureContext_LoadContext \n"
- " \n"
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
- " mrs r2, psp \n"/* Read PSP in r2. */
- " \n"
- " cbz r0, save_ns_context \n"/* No secure context to save. */
- " push {r0-r2, r14} \n"
- " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r0-r3} \n"/* LR is now in r3. */
- " mov lr, r3 \n"/* LR = r3. */
- " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
- " b select_next_task \n"
- " \n"
- " save_ns_context: \n"
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- " it eq \n"
- " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
- " \n"
- " select_next_task: \n"
- " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
- " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " dsb \n"
- " isb \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"/* r0 = 0. */
- " msr basepri, r0 \n"/* Enable interrupts. */
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
- " \n"
- " restore_ns_context: \n"
- " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- " it eq \n"
- " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
- "xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
- );
-}
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " mrs r2, psp \n" /* Read PSP in r2. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " push {r0-r2, r14} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* LR = r3. */
+ " lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
+ " subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " b select_next_task \n"
+ " \n"
+ " save_ns_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n" /* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n" /* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+ " \n"
+ " ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n" /* LR = r4. */
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n" /* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n" /* LR = r4. */
+ " lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " restore_ns_context: \n"
+ " ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ "xSecureContextConst: .word xSecureContext \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
-void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " tst lr, #4 \n"
- " ite eq \n"
- " mrseq r0, msp \n"
- " mrsne r0, psp \n"
- " ldr r1, svchandler_address_const \n"
- " bx r1 \n"
- " \n"
- " .align 4 \n"
- "svchandler_address_const: .word vPortSVCHandler_C \n"
- );
-}
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " svc %0 \n"/* Secure context is allocated in the supervisor call. */
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " svc %0 \n" /* Secure context is allocated in the supervisor call. */
+ " bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@@ -456,14 +594,14 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
- " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
- " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
- " it ne \n"
- " svcne %0 \n"/* Secure context is freed in the supervisor call. */
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
+ " ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
+ " cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
+ " it ne \n"
+ " svcne %0 \n" /* Secure context is freed in the supervisor call. */
+ " bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/portasm.h b/Source/portable/GCC/ARM_CM33/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/GCC/ARM_CM33/non_secure/portasm.h
+++ b/Source/portable/GCC/ARM_CM33/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM33/non_secure/portmacro.h
index 82f937a..cc79870 100644
--- a/Source/portable/GCC/ARM_CM33/non_secure/portmacro.h
+++ b/Source/portable/GCC/ARM_CM33/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -49,9 +49,14 @@
* Architecture specifics.
*/
#define portARCH_NAME "Cortex-M33"
+#define portHAS_BASEPRI 1
#define portDONT_DISCARD __attribute__( ( used ) )
/*-----------------------------------------------------------*/
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
/**
* @brief Critical section management.
*/
@@ -59,8 +64,10 @@
#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
+++ b/Source/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_context.c b/Source/portable/GCC/ARM_CM33/secure/secure_context.c
index 1996693..e37dd96 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_context.c
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_context.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_context.h b/Source/portable/GCC/ARM_CM33/secure/secure_context.h
index de33d15..2220ea6 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_context.h
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_context.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_context_port.c b/Source/portable/GCC/ARM_CM33/secure/secure_context_port.c
index 952db8a..d70822c 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_context_port.c
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_context_port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_heap.c b/Source/portable/GCC/ARM_CM33/secure/secure_heap.c
index b3bf007..19f7c23 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_heap.c
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_heap.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_heap.h b/Source/portable/GCC/ARM_CM33/secure/secure_heap.h
index e469f2c..75c9cb0 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_heap.h
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_heap.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_init.c b/Source/portable/GCC/ARM_CM33/secure/secure_init.c
index f6570d8..f93bfce 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_init.c
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_init.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_init.h b/Source/portable/GCC/ARM_CM33/secure/secure_init.h
index e89af71..e6c9da0 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_init.h
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_init.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33/secure/secure_port_macros.h b/Source/portable/GCC/ARM_CM33/secure/secure_port_macros.h
index 2fb7c59..d7ac583 100644
--- a/Source/portable/GCC/ARM_CM33/secure/secure_port_macros.h
+++ b/Source/portable/GCC/ARM_CM33/secure/secure_port_macros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
index 3a97911..b3f6a0a 100644
--- a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -36,110 +36,138 @@
/* Portasm includes. */
#include "portasm.h"
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
-void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- );
-}
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n" /* Finally, branch to EXC_RETURN. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- " ite ne \n"
- " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "memory"
);
}
@@ -149,12 +177,12 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* Read the CONTROL register. */
- " bic r0, #1 \n"/* Clear the bit 0. */
- " msr control, r0 \n"/* Write back the new CONTROL value. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -164,12 +192,12 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, control \n"/* r0 = CONTROL. */
- " orr r0, #1 \n"/* r0 = r0 | 1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -179,21 +207,21 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
- " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
- " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start the first task. */
- " nop \n"
- " \n"
- " .align 4 \n"
- "xVTORConst: .word 0xe000ed08 \n"
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
@@ -203,14 +231,14 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
- " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " dsb \n"
- " isb \n"
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -220,146 +248,252 @@
{
__asm volatile
(
- " .syntax unified \n"
- " \n"
- " msr basepri, r0 \n"/* basepri = ulMask. */
- " dsb \n"
- " isb \n"
- " bx lr \n"/* Return. */
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
::: "memory"
);
}
/*-----------------------------------------------------------*/
-void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " mrs r0, psp \n"/* Read PSP in r0. */
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- " it eq \n"
- " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
- " \n"
- " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " \n"
- " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
- " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- " dsb \n"
- " isb \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"/* r0 = 0. */
- " msr basepri, r0 \n"/* Enable interrupts. */
- " \n"
- " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
- " \n"
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- " it eq \n"
- " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " bx r3 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
- ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
- );
-}
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, psp \n" /* Read PSP in r0. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " mrs r2, psplim \n" /* r2 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " str r0, [r1] \n" /* Save the new top of stack in TCB. */
+ " \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+ " \n"
+ " ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
+ " msr psp, r0 \n" /* Remember the new top of stack for the task. */
+ " bx r3 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
-void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
-{
- __asm volatile
- (
- " .syntax unified \n"
- " \n"
- " tst lr, #4 \n"
- " ite eq \n"
- " mrseq r0, msp \n"
- " mrsne r0, psp \n"
- " ldr r1, svchandler_address_const \n"
- " bx r1 \n"
- " \n"
- " .align 4 \n"
- "svchandler_address_const: .word vPortSVCHandler_C \n"
- );
-}
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.h b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.h
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h
index 82f937a..cc79870 100644
--- a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -49,9 +49,14 @@
* Architecture specifics.
*/
#define portARCH_NAME "Cortex-M33"
+#define portHAS_BASEPRI 1
#define portDONT_DISCARD __attribute__( ( used ) )
/*-----------------------------------------------------------*/
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
/**
* @brief Critical section management.
*/
@@ -59,8 +64,10 @@
#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
+++ b/Source/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..7aa8166
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2105 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/Source/portable/GCC/ARM_CM3_MPU/port.c b/Source/portable/GCC/ARM_CM3_MPU/port.c
index 40b38b2..f99acf5 100644
--- a/Source/portable/GCC/ARM_CM3_MPU/port.c
+++ b/Source/portable/GCC/ARM_CM3_MPU/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -38,6 +38,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
+#include "mpu_syscall_numbers.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@@ -83,12 +84,14 @@
/* Constants required to access and manipulate the SysTick. */
#define portNVIC_SYSTICK_INT ( 0x00000002UL )
#define portNVIC_SYSTICK_ENABLE ( 0x00000001UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
#define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL )
/* Constants required to set up the initial stack. */
#define portINITIAL_XPSR ( 0x01000000 )
+#define portINITIAL_EXC_RETURN ( 0xfffffffdUL )
#define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 )
#define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 )
@@ -102,12 +105,31 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/*
@@ -145,7 +167,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
/**
* @brief Checks whether or not the processor is privileged.
@@ -167,7 +189,7 @@
/**
* @brief Enter critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@@ -176,11 +198,53 @@
/**
* @brief Exit from critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
@@ -188,6 +252,15 @@
* switches can only occur when uxCriticalNesting is zero. */
static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/*
+ * This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/*
* Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
* FreeRTOS API functions are not called from interrupts that have been assigned
@@ -206,60 +279,115 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ }
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r2, [r0, #24] \n"
+ "ldrb r1, [r2, #-2] \n"
+ "cmp r1, %0 \n"
+ "blt vSystemCallEnter \n"
+ "cmp r1, %1 \n"
+ "beq vSystemCallExit \n"
+ "b vSVCHandler_C \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
}
- return pxTopOfStack;
-}
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ /* Assumes psp was in use. */
+ __asm volatile
+ (
+ #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ #else
+ " mrs r0, psp \n"
+ #endif
+ " b %0 \n"
+ ::"i" ( vSVCHandler_C ) : "r0", "memory"
+ );
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
-void vPortSVCHandler( void )
-{
- /* Assumes psp was in use. */
- __asm volatile
- (
- #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
- " tst lr, #4 \n"
- " ite eq \n"
- " mrseq r0, msp \n"
- " mrsne r0, psp \n"
- #else
- " mrs r0, psp \n"
- #endif
- " b %0 \n"
- ::"i" ( prvSVCHandler ) : "r0", "memory"
- );
-}
-/*-----------------------------------------------------------*/
-
-static void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -269,7 +397,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@@ -295,83 +423,307 @@
break;
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
+ * svc was raised from any of the
+ * system calls. */
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
- case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
- * svc was raised from any of the
- * system calls. */
-
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- __asm volatile
- (
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
- ::: "r1", "memory"
- );
- }
-
- break;
- #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
- case portSVC_RAISE_PRIVILEGE:
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
__asm volatile
(
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
- break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ }
- default: /* Unknown SVC call. */
- break;
+ break;
+ #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ case portSVC_RAISE_PRIVILEGE:
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+ break;
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default: /* Unknown SVC call. */
+ break;
}
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulSystemCallLocation, i;
+ const uint32_t ulStackFrameSize = 8;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulSystemCallLocation, i;
+ const uint32_t ulStackFrameSize = 8;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
static void prvRestoreContextOfFirstTask( void )
{
__asm volatile
(
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */
- " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldr r14, =0xfffffffd \n"/* Load exec return code. */
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
/*-----------------------------------------------------------*/
@@ -381,71 +733,88 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See
- * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) );
-
#if ( configASSERT_DEFINED == 1 )
+ {
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions
+ * to ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = *pucFirstUserPriorityRegister;
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- volatile uint32_t ulOriginalPriority;
- volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
- volatile uint8_t ucMaxPriorityValue;
-
- /* Determine the maximum priority from which ISR safe FreeRTOS API
- * functions can be called. ISR safe functions are those that end in
- * "FromISR". FreeRTOS maintains separate thread and ISR API functions
- * to ensure interrupt entry is as fast and simple as possible.
- *
- * Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
-
- /* Determine the number of priority bits available. First write to all
- * possible bits. */
- *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
-
- /* Read the value back to see how many bits stuck. */
- ucMaxPriorityValue = *pucFirstUserPriorityRegister;
-
- /* Use the same mask on the maximum system call priority. */
- ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
-
- /* Calculate the maximum acceptable priority group value for the number
- * of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
-
- while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
- {
- ulMaxPRIGROUPValue--;
- ucMaxPriorityValue <<= ( uint8_t ) 0x01;
- }
-
- #ifdef __NVIC_PRIO_BITS
- {
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
- }
- #endif
-
- #ifdef configPRIO_BITS
- {
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
- }
- #endif
-
- /* Shift the priority group value back to its position within the AIRCR
- * register. */
- ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
- ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
-
- /* Restore the clobbered interrupt priority register to its original
- * value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
+ }
#endif /* configASSERT_DEFINED */
/* Make PendSV and SysTick the same priority as the kernel, and the SVC
@@ -464,20 +833,28 @@
/* Initialise the critical nesting count ready for the first task. */
uxCriticalNesting = 0;
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
- __asm volatile (
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start first task. */
- " nop \n"
- " .ltorg \n"
- ::"i" ( portSVC_START_SCHEDULER ) : "memory" );
+ __asm volatile
+ (
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start first task. */
+ " nop \n"
+ " .ltorg \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
/* Should not get here! */
return 0;
@@ -494,39 +871,63 @@
void vPortEnterCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
- }
-#else
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@@ -534,30 +935,7 @@
{
portENABLE_INTERRUPTS();
}
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
- }
-#else
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@@ -567,54 +945,67 @@
__asm volatile
(
- " mrs r0, psp \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " mrs r1, control \n"
- " stmdb r0!, {r1, r4-r11} \n"/* Save the remaining registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
- " \n"
- " stmdb sp!, {r3, r14} \n"
- " mov r0, %0 \n"
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r3, r14} \n"
- " \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */
- " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " \n"
- " msr psp, r0 \n"
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */
+ " \n"
+ /*------------ Save Context. ----------- */
+ " mrs r3, control \n"
+ " mrs r0, psp \n"
+ " isb \n"
+ " \n"
+ " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */
+ " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */
+ " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ /*---------- Select next task. --------- */
+ " mov r0, %0 \n"
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -745,14 +1136,14 @@
{
__asm volatile
(
- " mrs r0, control \n"/* r0 = CONTROL. */
- " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- " ite ne \n"
- " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "memory"
);
}
@@ -762,10 +1153,10 @@
{
__asm volatile
(
- " mrs r0, control \n"/* r0 = CONTROL. */
- " orr r0, #1 \n"/* r0 = r0 | 1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -798,11 +1189,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -825,6 +1224,11 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( portMPU_REGION_CACHEABLE_BUFFERABLE ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -845,12 +1249,30 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -859,6 +1281,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
@@ -887,10 +1350,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
@@ -919,3 +1382,98 @@
#endif /* configASSERT_DEFINED */
/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM3_MPU/portmacro.h b/Source/portable/GCC/ARM_CM3_MPU/portmacro.h
index 23499eb..170a58c 100644
--- a/Source/portable/GCC/ARM_CM3_MPU/portmacro.h
+++ b/Source/portable/GCC/ARM_CM3_MPU/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,11 +28,13 @@
#ifndef PORTMACRO_H
- #define PORTMACRO_H
+#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -57,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -100,11 +104,52 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
-/* Plus 1 to create space for the stack region. */
+ typedef struct MPU_REGION_SETTINGS
+ {
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+ } xMPU_REGION_SETTINGS;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#define MAX_CONTEXT_SIZE ( 20 )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+#define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
- } xMPU_SETTINGS;
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+} xMPU_SETTINGS;
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
@@ -114,13 +159,14 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
- #define portSVC_START_SCHEDULER 0
- #define portSVC_YIELD 1
- #define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 100
+#define portSVC_YIELD 101
+#define portSVC_RAISE_PRIVILEGE 102
+#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */
- #define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
+ #define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
#define portYIELD_WITHIN_API() \
{ \
/* Set a PendSV to request a context switch. */ \
@@ -228,6 +274,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -256,10 +312,10 @@
__asm volatile
(
- " mov %0, %1 \n"\
- " msr basepri, %0 \n"\
- " isb \n"\
- " dsb \n"\
+ " mov %0, %1 \n"\
+ " msr basepri, %0 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -272,11 +328,11 @@
__asm volatile
(
- " mrs %0, basepri \n"\
- " mov %1, %2 \n"\
- " msr basepri, %1 \n"\
- " isb \n"\
- " dsb \n"\
+ " mrs %0, basepri \n"\
+ " mov %1, %2 \n"\
+ " msr basepri, %1 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -290,7 +346,7 @@
{
__asm volatile
(
- " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
+ " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
@@ -298,12 +354,15 @@
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
#ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY
- #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html"
+ #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. *www.FreeRTOS.org/FreeRTOS-V10.3.x.html"
#define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0
#endif
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM4F/port.c b/Source/portable/GCC/ARM_CM4F/port.c
index 4c9925c..73430b5 100644
--- a/Source/portable/GCC/ARM_CM4F/port.c
+++ b/Source/portable/GCC/ARM_CM4F/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -58,8 +58,9 @@
#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL )
#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -250,18 +251,18 @@
void vPortSVCHandler( void )
{
__asm volatile (
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */
+ " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
+ " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
+ " msr psp, r0 \n" /* Restore the task stack pointer. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
/*-----------------------------------------------------------*/
@@ -273,19 +274,19 @@
* would otherwise result in the unnecessary leaving of space in the SVC stack
* for lazy saving of FPU registers. */
__asm volatile (
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */
- " msr control, r0 \n"
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc 0 \n"/* System call to start first task. */
- " nop \n"
- " .ltorg \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */
+ " msr control, r0 \n"
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc 0 \n" /* System call to start first task. */
+ " nop \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -295,10 +296,6 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
/* This port can be used on all revisions of the Cortex-M7 core other than
* the r0p1 parts. r0p1 parts should use the port from the
* /source/portable/GCC/ARM_CM7/r0p1 directory. */
@@ -307,7 +304,8 @@
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -317,7 +315,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -329,33 +327,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -364,7 +382,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -445,52 +463,52 @@
__asm volatile
(
- " mrs r0, psp \n"
- " isb \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */
- " it eq \n"
- " vstmdbeq r0!, {s16-s31} \n"
- " \n"
- " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
- " \n"
- " stmdb sp!, {r0, r3} \n"
- " mov r0, %0 \n"
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r0, r3} \n"
- " \n"
- " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldr r0, [r1] \n"
- " \n"
- " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */
- " it eq \n"
- " vldmiaeq r0!, {s16-s31} \n"
- " \n"
- " msr psp, r0 \n"
- " isb \n"
- " \n"
+ " mrs r0, psp \n"
+ " isb \n"
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */
+ " ldr r2, [r3] \n"
+ " \n"
+ " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */
+ " it eq \n"
+ " vstmdbeq r0!, {s16-s31} \n"
+ " \n"
+ " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */
+ " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */
+ " \n"
+ " stmdb sp!, {r0, r3} \n"
+ " mov r0, %0 \n"
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " ldmia sp!, {r0, r3} \n"
+ " \n"
+ " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r0, [r1] \n"
+ " \n"
+ " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */
+ " \n"
+ " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */
+ " it eq \n"
+ " vldmiaeq r0!, {s16-s31} \n"
+ " \n"
+ " msr psp, r0 \n"
+ " isb \n"
+ " \n"
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */
#if WORKAROUND_PMU_CM001 == 1
- " push { r14 } \n"
- " pop { pc } \n"
+ " push { r14 } \n"
+ " pop { pc } \n"
#endif
#endif
- " \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -767,13 +785,13 @@
{
__asm volatile
(
- " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */
- " ldr r1, [r0] \n"
- " \n"
- " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */
- " str r1, [r0] \n"
- " bx r14 \n"
- " .ltorg \n"
+ " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */
+ " ldr r1, [r0] \n"
+ " \n"
+ " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */
+ " str r1, [r0] \n"
+ " bx r14 \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -806,10 +824,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/GCC/ARM_CM4F/portmacro.h b/Source/portable/GCC/ARM_CM4F/portmacro.h
index b85a98b..ec9cfc9 100644
--- a/Source/portable/GCC/ARM_CM4F/portmacro.h
+++ b/Source/portable/GCC/ARM_CM4F/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -30,9 +30,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -57,16 +59,21 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
+ typedef uint64_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffffffffffULL
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -197,10 +204,10 @@
__asm volatile
(
- " mov %0, %1 \n"\
- " msr basepri, %0 \n"\
- " isb \n"\
- " dsb \n"\
+ " mov %0, %1 \n"\
+ " msr basepri, %0 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -213,11 +220,11 @@
__asm volatile
(
- " mrs %0, basepri \n"\
- " mov %1, %2 \n"\
- " msr basepri, %1 \n"\
- " isb \n"\
- " dsb \n"\
+ " mrs %0, basepri \n"\
+ " mov %1, %2 \n"\
+ " msr basepri, %1 \n"\
+ " isb \n"\
+ " dsb \n"\
: "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -231,15 +238,17 @@
{
__asm volatile
(
- " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
+ " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..7aa8166
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2105 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/Source/portable/GCC/ARM_CM4_MPU/port.c b/Source/portable/GCC/ARM_CM4_MPU/port.c
index d99b8b4..575c5ac 100644
--- a/Source/portable/GCC/ARM_CM4_MPU/port.c
+++ b/Source/portable/GCC/ARM_CM4_MPU/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -38,6 +38,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
+#include "mpu_syscall_numbers.h"
#ifndef __VFP_FP__
#error This port can only be used when the project options are configured to enable hardware floating point support.
@@ -93,8 +94,9 @@
/* Constants required to access and manipulate the SysTick. */
#define portNVIC_SYSTICK_INT ( 0x00000002UL )
#define portNVIC_SYSTICK_ENABLE ( 0x00000001UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
#define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL )
/* Constants required to manipulate the VFP. */
@@ -117,13 +119,35 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/*
* Configure a number of standard MPU regions that are used by all tasks.
*/
@@ -159,7 +183,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
/*
* Function to enable the VFP.
@@ -186,7 +210,7 @@
/**
* @brief Enter critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@@ -195,11 +219,57 @@
/**
* @brief Exit from critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
@@ -207,6 +277,15 @@
* switches can only occur when uxCriticalNesting is zero. */
static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/*
+ * This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/*
* Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
* FreeRTOS API functions are not called from interrupts that have been assigned
@@ -226,66 +305,123 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ }
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
}
- return pxTopOfStack;
-}
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void vPortSVCHandler( void )
+ {
+ /* Assumes psp was in use. */
+ __asm volatile
+ (
+ #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ #else
+ " mrs r0, psp \n"
+ #endif
+ " b %0 \n"
+ ::"i" ( vSVCHandler_C ) : "r0", "memory"
+ );
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
-void vPortSVCHandler( void )
-{
- /* Assumes psp was in use. */
- __asm volatile
- (
- #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
- " tst lr, #4 \n"
- " ite eq \n"
- " mrseq r0, msp \n"
- " mrsne r0, psp \n"
- #else
- " mrs r0, psp \n"
- #endif
- " b %0 \n"
- ::"i" ( prvSVCHandler ) : "r0", "memory"
- );
-}
-/*-----------------------------------------------------------*/
-
-static void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -295,7 +431,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@@ -321,88 +457,348 @@
break;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
- case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
- * svc was raised from any of the
- * system calls. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
+ * svc was raised from any of the
+ * system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- __asm volatile
- (
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
- ::: "r1", "memory"
- );
- }
-
- break;
- #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
- case portSVC_RAISE_PRIVILEGE:
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
__asm volatile
(
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
- break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ }
- default: /* Unknown SVC call. */
- break;
+ break;
+ #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ case portSVC_RAISE_PRIVILEGE:
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+ break;
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default: /* Unknown SVC call. */
+ break;
}
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
static void prvRestoreContextOfFirstTask( void )
{
__asm volatile
(
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB\n"
);
}
/*-----------------------------------------------------------*/
@@ -412,10 +808,6 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See
- * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) );
-
/* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0
* and r0p1 cores. */
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
@@ -429,66 +821,87 @@
#endif
#if ( configASSERT_DEFINED == 1 )
+ {
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = *pucFirstUserPriorityRegister;
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- volatile uint32_t ulOriginalPriority;
- volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
- volatile uint8_t ucMaxPriorityValue;
-
- /* Determine the maximum priority from which ISR safe FreeRTOS API
- * functions can be called. ISR safe functions are those that end in
- * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
- * ensure interrupt entry is as fast and simple as possible.
- *
- * Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
-
- /* Determine the number of priority bits available. First write to all
- * possible bits. */
- *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
-
- /* Read the value back to see how many bits stuck. */
- ucMaxPriorityValue = *pucFirstUserPriorityRegister;
-
- /* Use the same mask on the maximum system call priority. */
- ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
-
- /* Calculate the maximum acceptable priority group value for the number
- * of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
-
- while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
- {
- ulMaxPRIGROUPValue--;
- ucMaxPriorityValue <<= ( uint8_t ) 0x01;
- }
-
- #ifdef __NVIC_PRIO_BITS
- {
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
- }
- #endif
-
- #ifdef configPRIO_BITS
- {
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
- }
- #endif
-
- /* Shift the priority group value back to its position within the AIRCR
- * register. */
- ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
- ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
-
- /* Restore the clobbered interrupt priority register to its original
- * value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
+ }
#endif /* configASSERT_DEFINED */
/* Make PendSV and SysTick the same priority as the kernel, and the SVC
@@ -507,6 +920,12 @@
/* Initialise the critical nesting count ready for the first task. */
uxCriticalNesting = 0;
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Ensure the VFP is enabled - it should be anyway. */
vPortEnableVFP();
@@ -517,21 +936,23 @@
* in use in case the FPU was used before the scheduler was started - which
* would otherwise result in the unnecessary leaving of space in the SVC stack
* for lazy saving of FPU registers. */
- __asm volatile (
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */
- " msr control, r0 \n"
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc %0 \n"/* System call to start first task. */
- " nop \n"
- " .ltorg \n"
- ::"i" ( portSVC_START_SCHEDULER ) : "memory" );
+ __asm volatile
+ (
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */
+ " msr control, r0 \n"
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start first task. */
+ " nop \n"
+ " .ltorg \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
/* Should not get here! */
return 0;
@@ -548,39 +969,63 @@
void vPortEnterCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
- }
-#else
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@@ -588,30 +1033,7 @@
{
portENABLE_INTERRUPTS();
}
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
- }
-#else
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@@ -621,76 +1043,94 @@
__asm volatile
(
- " mrs r0, psp \n"
- " isb \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */
- " it eq \n"
- " vstmdbeq r0!, {s16-s31} \n"
- " \n"
- " mrs r1, control \n"
- " stmdb r0!, {r1, r4-r11, r14} \n"/* Save the remaining registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
- " \n"
- " stmdb sp!, {r0, r3} \n"
- " mov r0, %0 \n"
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r0, r3} \n"
- " \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */
- " it eq \n"
- " vldmiaeq r0!, {s16-s31} \n"
- " \n"
- " msr psp, r0 \n"
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */
+ " \n"
+ /*------------ Save Context. ----------- */
+ " mrs r3, control \n"
+ " mrs r0, psp \n"
+ " isb \n"
+ " \n"
+ " add r0, r0, #0x20 \n" /* Move r0 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r0, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r0, r0, #0x20 \n" /* Set r0 back to the location of hardware saved context. */
+ " \n"
+ " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */
+ " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */
+ " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ /*---------- Select next task. --------- */
+ " mov r0, %0 \n"
+ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ #endif
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ #endif
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r0!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -734,13 +1174,13 @@
{
__asm volatile
(
- " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */
- " ldr r1, [r0] \n"
- " \n"
- " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */
- " str r1, [r0] \n"
- " bx r14 \n"
- " .ltorg \n"
+ " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */
+ " ldr r1, [r0] \n"
+ " \n"
+ " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */
+ " str r1, [r0] \n"
+ " bx r14 \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -748,7 +1188,6 @@
static void prvSetupMPU( void )
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -856,14 +1295,14 @@
{
__asm volatile
(
- " mrs r0, control \n"/* r0 = CONTROL. */
- " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- " ite ne \n"
- " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- " bx lr \n"/* Return. */
- " \n"
- " .align 4 \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
::: "r0", "memory"
);
}
@@ -873,10 +1312,10 @@
{
__asm volatile
(
- " mrs r0, control \n"/* r0 = CONTROL. */
- " orr r0, #1 \n"/* r0 = r0 | 1. */
- " msr control, r0 \n"/* CONTROL = r0. */
- " bx lr \n"/* Return to the caller. */
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@@ -888,7 +1327,6 @@
uint32_t ulStackDepth )
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __SRAM_segment_start__;
@@ -921,11 +1359,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -948,6 +1394,12 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -968,12 +1420,30 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -982,6 +1452,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
@@ -1042,3 +1553,98 @@
#endif /* configASSERT_DEFINED */
/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM4_MPU/portmacro.h b/Source/portable/GCC/ARM_CM4_MPU/portmacro.h
index 0e7dd18..e76c687 100644
--- a/Source/portable/GCC/ARM_CM4_MPU/portmacro.h
+++ b/Source/portable/GCC/ARM_CM4_MPU/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -60,16 +60,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
-#if ( configUSE_16_BIT_TICKS == 1 )
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
-#else
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -191,9 +193,51 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+#define MAX_CONTEXT_SIZE ( 52 )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+#define portACL_ENTRY_SIZE_BITS ( 32U )
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -204,13 +248,14 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 100
+#define portSVC_YIELD 101
+#define portSVC_RAISE_PRIVILEGE 102
+#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */
-#define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
+#define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
#define portYIELD_WITHIN_API() \
{ \
/* Set a PendSV to request a context switch. */ \
@@ -318,6 +363,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -346,15 +401,15 @@
__asm volatile
(
- " mov %0, %1 \n"
+ " mov %0, %1 \n"
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
#endif
- " msr basepri, %0 \n"
- " isb \n"
- " dsb \n"
+ " msr basepri, %0 \n"
+ " isb \n"
+ " dsb \n"
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
#endif
: "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -368,16 +423,16 @@
__asm volatile
(
- " mrs %0, basepri \n"
- " mov %1, %2 \n"
+ " mrs %0, basepri \n"
+ " mov %1, %2 \n"
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
#endif
- " msr basepri, %1 \n"
- " isb \n"
- " dsb \n"
+ " msr basepri, %1 \n"
+ " isb \n"
+ " dsb \n"
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
#endif
: "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -392,7 +447,7 @@
{
__asm volatile
(
- " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
+ " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/port.c b/Source/portable/GCC/ARM_CM55/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/portasm.c b/Source/portable/GCC/ARM_CM55/non_secure/portasm.c
new file mode 100644
index 0000000..7431c98
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/portasm.c
@@ -0,0 +1,608 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION
+ * is defined correctly and privileged functions are placed in correct sections. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Portasm includes. */
+#include "portasm.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
+ * header files. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ "xSecureContextConst2: .word xSecureContext \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vResetPrivilege( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::: "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " mrs r2, psp \n" /* Read PSP in r2. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " push {r0-r2, r14} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* LR = r3. */
+ " lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
+ " subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " b select_next_task \n"
+ " \n"
+ " save_ns_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n" /* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n" /* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+ " \n"
+ " ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n" /* LR = r4. */
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n" /* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n" /* LR = r4. */
+ " lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " restore_ns_context: \n"
+ " ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ "xSecureContextConst: .word xSecureContext \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " svc %0 \n" /* Secure context is allocated in the supervisor call. */
+ " bx lr \n" /* Return. */
+ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
+ " ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
+ " cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
+ " it ne \n"
+ " svcne %0 \n" /* Secure context is freed in the supervisor call. */
+ " bx lr \n" /* Return. */
+ ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/portasm.h b/Source/portable/GCC/ARM_CM55/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM55/non_secure/portmacro.h
new file mode 100644
index 0000000..880205c
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/portmacro.h
@@ -0,0 +1,78 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M55"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __attribute__( ( used ) )
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_context.c b/Source/portable/GCC/ARM_CM55/secure/secure_context.c
new file mode 100644
index 0000000..e37dd96
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_context.c
@@ -0,0 +1,351 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief CONTROL value for privileged tasks.
+ *
+ * Bit[0] - 0 --> Thread mode is privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
+
+/**
+ * @brief CONTROL value for un-privileged tasks.
+ *
+ * Bit[0] - 1 --> Thread mode is un-privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
+
+/**
+ * @brief Size of stack seal values in bytes.
+ */
+#define securecontextSTACK_SEAL_SIZE 8
+
+/**
+ * @brief Stack seal value as recommended by ARM.
+ */
+#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5
+
+/**
+ * @brief Maximum number of secure contexts.
+ */
+#ifndef secureconfigMAX_SECURE_CONTEXTS
+ #define secureconfigMAX_SECURE_CONTEXTS 8UL
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Pre-allocated array of secure contexts.
+ */
+SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ];
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Get a free secure context for a task from the secure context pool (xSecureContexts).
+ *
+ * This function ensures that only one secure context is allocated for a task.
+ *
+ * @param[in] pvTaskHandle The task handle for which the secure context is allocated.
+ *
+ * @return Index of a free secure context in the xSecureContexts array.
+ */
+static uint32_t ulGetSecureContext( void * pvTaskHandle );
+
+/**
+ * @brief Return the secure context to the secure context pool (xSecureContexts).
+ *
+ * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array.
+ */
+static void vReturnSecureContext( uint32_t ulSecureContextIndex );
+
+/* These are implemented in assembly. */
+extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext );
+extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext );
+/*-----------------------------------------------------------*/
+
+static uint32_t ulGetSecureContext( void * pvTaskHandle )
+{
+ /* Start with invalid index. */
+ uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) &&
+ ( xSecureContexts[ i ].pucStackLimit == NULL ) &&
+ ( xSecureContexts[ i ].pucStackStart == NULL ) &&
+ ( xSecureContexts[ i ].pvTaskHandle == NULL ) &&
+ ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = i;
+ }
+ else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle )
+ {
+ /* A task can only have one secure context. Do not allocate a second
+ * context for the same task. */
+ ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+ break;
+ }
+ }
+
+ return ulSecureContextIndex;
+}
+/*-----------------------------------------------------------*/
+
+static void vReturnSecureContext( uint32_t ulSecureContextIndex )
+{
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
+{
+ uint32_t ulIPSR, i;
+ static uint32_t ulSecureContextsInitialized = 0;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) )
+ {
+ /* Ensure to initialize secure contexts only once. */
+ ulSecureContextsInitialized = 1;
+
+ /* No stack for thread mode until a task's context is loaded. */
+ secureportSET_PSPLIM( securecontextNO_STACK );
+ secureportSET_PSP( securecontextNO_STACK );
+
+ /* Initialize all secure contexts. */
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ xSecureContexts[ i ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ i ].pucStackLimit = NULL;
+ xSecureContexts[ i ].pucStackStart = NULL;
+ xSecureContexts[ i ].pvTaskHandle = NULL;
+ }
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Configure thread mode to use PSP and to be unprivileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Configure thread mode to use PSP and to be privileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
+ }
+ #endif /* configENABLE_MPU */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle )
+#else /* configENABLE_MPU */
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle )
+#endif /* configENABLE_MPU */
+{
+ uint8_t * pucStackMemory = NULL;
+ uint8_t * pucStackLimit;
+ uint32_t ulIPSR, ulSecureContextIndex;
+ SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID;
+
+ #if ( configENABLE_MPU == 1 )
+ uint32_t * pulCurrentStackPointer = NULL;
+ #endif /* configENABLE_MPU */
+
+ /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit
+ * Register (PSPLIM) value. */
+ secureportREAD_IPSR( ulIPSR );
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode.
+ * Also do nothing, if a secure context us already loaded. PSPLIM is set to
+ * securecontextNO_STACK when no secure context is loaded. */
+ if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) )
+ {
+ /* Ontain a free secure context. */
+ ulSecureContextIndex = ulGetSecureContext( pvTaskHandle );
+
+ /* Were we able to get a free context? */
+ if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS )
+ {
+ /* Allocate the stack space. */
+ pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE );
+
+ if( pucStackMemory != NULL )
+ {
+ /* Since stack grows down, the starting point will be the last
+ * location. Note that this location is next to the last
+ * allocated byte for stack (excluding the space for seal values)
+ * because the hardware decrements the stack pointer before
+ * writing i.e. if stack pointer is 0x2, a push operation will
+ * decrement the stack pointer to 0x1 and then write at 0x1. */
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize;
+
+ /* Seal the created secure process stack. */
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE;
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE;
+
+ /* The stack cannot go beyond this location. This value is
+ * programmed in the PSPLIM register on context switch.*/
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory;
+
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Store the correct CONTROL value for the task on the stack.
+ * This value is programmed in the CONTROL register on
+ * context switch. */
+ pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ pulCurrentStackPointer--;
+
+ if( ulIsTaskPrivileged )
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
+ }
+ else
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
+ }
+
+ /* Store the current stack pointer. This value is programmed in
+ * the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Current SP is set to the starting of the stack. This
+ * value programmed in the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Ensure to never return 0 as a valid context handle. */
+ xSecureContextHandle = ulSecureContextIndex + 1UL;
+ }
+ }
+ }
+
+ return xSecureContextHandle;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint32_t ulIPSR, ulSecureContextIndex;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* Only free if a valid context handle is passed. */
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ /* Ensure that the secure context being deleted is associated with
+ * the task. */
+ if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle )
+ {
+ /* Free the stack space. */
+ vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit );
+
+ /* Return the secure context back to the free secure contexts pool. */
+ vReturnSecureContext( ulSecureContextIndex );
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that no secure context is loaded and the task is loading it's
+ * own context. */
+ if( ( pucStackLimit == securecontextNO_STACK ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that task's context is loaded and the task is saving it's own
+ * context. */
+ if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_context.h b/Source/portable/GCC/ARM_CM55/secure/secure_context.h
new file mode 100644
index 0000000..2220ea6
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_context.h
@@ -0,0 +1,135 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_CONTEXT_H__
+#define __SECURE_CONTEXT_H__
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* FreeRTOS includes. */
+#include "FreeRTOSConfig.h"
+
+/**
+ * @brief PSP value when no secure context is loaded.
+ */
+#define securecontextNO_STACK 0x0
+
+/**
+ * @brief Invalid context ID.
+ */
+#define securecontextINVALID_CONTEXT_ID 0UL
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Structure to represent a secure context.
+ *
+ * @note Since stack grows down, pucStackStart is the highest address while
+ * pucStackLimit is the first address of the allocated memory.
+ */
+typedef struct SecureContext
+{
+ uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
+ uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
+ uint8_t * pucStackStart; /**< First location of the stack memory. */
+ void * pvTaskHandle; /**< Task handle of the task this context is associated with. */
+} SecureContext_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Opaque handle for a secure context.
+ */
+typedef uint32_t SecureContextHandle_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Initializes the secure context management system.
+ *
+ * PSP is set to NULL and therefore a task must allocate and load a context
+ * before calling any secure side function in the thread mode.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureContext_Init( void );
+
+/**
+ * @brief Allocates a context on the secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] ulSecureStackSize Size of the stack to allocate on secure side.
+ * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise.
+ *
+ * @return Opaque context handle if context is successfully allocated, NULL
+ * otherwise.
+ */
+#if ( configENABLE_MPU == 1 )
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle );
+#else /* configENABLE_MPU */
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle );
+#endif /* configENABLE_MPU */
+
+/**
+ * @brief Frees the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the
+ * context to be freed.
+ */
+void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Loads the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be loaded.
+ */
+void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Saves the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be saved.
+ */
+void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+#endif /* __SECURE_CONTEXT_H__ */
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_context_port.c b/Source/portable/GCC/ARM_CM55/secure/secure_context_port.c
new file mode 100644
index 0000000..d70822c
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_context_port.c
@@ -0,0 +1,97 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) );
+void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) );
+
+void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext )
+{
+ /* pxSecureContext value is in r0. */
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r1, ipsr \n" /* r1 = IPSR. */
+ " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
+ " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */
+ " \n"
+ #if ( configENABLE_MPU == 1 )
+ " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
+ " msr control, r3 \n" /* CONTROL = r3. */
+ #endif /* configENABLE_MPU */
+ " \n"
+ " msr psplim, r2 \n" /* PSPLIM = r2. */
+ " msr psp, r1 \n" /* PSP = r1. */
+ " \n"
+ " load_ctx_therad_mode: \n"
+ " bx lr \n"
+ " \n"
+ ::: "r0", "r1", "r2"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext )
+{
+ /* pxSecureContext value is in r0. */
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r1, ipsr \n" /* r1 = IPSR. */
+ " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
+ " mrs r1, psp \n" /* r1 = PSP. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */
+ " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ #if ( configENABLE_MPU == 1 )
+ " mrs r2, control \n" /* r2 = CONTROL. */
+ " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
+ #endif /* configENABLE_MPU */
+ " \n"
+ " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */
+ " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
+ " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
+ " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
+ " \n"
+ " save_ctx_therad_mode: \n"
+ " bx lr \n"
+ " \n"
+ ::"i" ( securecontextNO_STACK ) : "r1", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_heap.c b/Source/portable/GCC/ARM_CM55/secure/secure_heap.c
new file mode 100644
index 0000000..19f7c23
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_heap.c
@@ -0,0 +1,454 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure context heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Total heap size.
+ */
+#ifndef secureconfigTOTAL_HEAP_SIZE
+ #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
+#endif
+
+/* No test marker by default. */
+#ifndef mtCOVERAGE_TEST_MARKER
+ #define mtCOVERAGE_TEST_MARKER()
+#endif
+
+/* No tracing by default. */
+#ifndef traceMALLOC
+ #define traceMALLOC( pvReturn, xWantedSize )
+#endif
+
+/* No tracing by default. */
+#ifndef traceFREE
+ #define traceFREE( pv, xBlockSize )
+#endif
+
+/* Block sizes must not get too small. */
+#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
+
+/* Assumes 8bit bytes! */
+#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
+/*-----------------------------------------------------------*/
+
+/* Allocate the memory for the heap. */
+#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
+
+/* The application writer has already defined the array used for the RTOS
+* heap - probably so it can be placed in a special segment or address. */
+ extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#else /* configAPPLICATION_ALLOCATED_HEAP */
+ static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/**
+ * @brief The linked list structure.
+ *
+ * This is used to link free blocks in order of their memory address.
+ */
+typedef struct A_BLOCK_LINK
+{
+ struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
+ size_t xBlockSize; /**< The size of the free block. */
+} BlockLink_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Called automatically to setup the required heap structures the first
+ * time pvPortMalloc() is called.
+ */
+static void prvHeapInit( void );
+
+/**
+ * @brief Inserts a block of memory that is being freed into the correct
+ * position in the list of free memory blocks.
+ *
+ * The block being freed will be merged with the block in front it and/or the
+ * block behind it if the memory blocks are adjacent to each other.
+ *
+ * @param[in] pxBlockToInsert The block being freed.
+ */
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The size of the structure placed at the beginning of each allocated
+ * memory block must by correctly byte aligned.
+ */
+static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+
+/**
+ * @brief Create a couple of list links to mark the start and end of the list.
+ */
+static BlockLink_t xStart;
+static BlockLink_t * pxEnd = NULL;
+
+/**
+ * @brief Keeps track of the number of free bytes remaining, but says nothing
+ * about fragmentation.
+ */
+static size_t xFreeBytesRemaining = 0U;
+static size_t xMinimumEverFreeBytesRemaining = 0U;
+
+/**
+ * @brief Gets set to the top bit of an size_t type.
+ *
+ * When this bit in the xBlockSize member of an BlockLink_t structure is set
+ * then the block belongs to the application. When the bit is free the block is
+ * still part of the free heap space.
+ */
+static size_t xBlockAllocatedBit = 0;
+/*-----------------------------------------------------------*/
+
+static void prvHeapInit( void )
+{
+ BlockLink_t * pxFirstFreeBlock;
+ uint8_t * pucAlignedHeap;
+ size_t uxAddress;
+ size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
+
+ /* Ensure the heap starts on a correctly aligned boundary. */
+ uxAddress = ( size_t ) ucHeap;
+
+ if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
+ {
+ uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
+ }
+
+ pucAlignedHeap = ( uint8_t * ) uxAddress;
+
+ /* xStart is used to hold a pointer to the first item in the list of free
+ * blocks. The void cast is used to prevent compiler warnings. */
+ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
+ xStart.xBlockSize = ( size_t ) 0;
+
+ /* pxEnd is used to mark the end of the list of free blocks and is inserted
+ * at the end of the heap space. */
+ uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
+ uxAddress -= xHeapStructSize;
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ pxEnd = ( void * ) uxAddress;
+ pxEnd->xBlockSize = 0;
+ pxEnd->pxNextFreeBlock = NULL;
+
+ /* To start with there is a single free block that is sized to take up the
+ * entire heap space, minus the space taken by pxEnd. */
+ pxFirstFreeBlock = ( void * ) pucAlignedHeap;
+ pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
+ pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
+
+ /* Only one block exists - and it covers the entire usable heap space. */
+ xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+ xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+
+ /* Work out the position of the top bit in a size_t variable. */
+ xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
+}
+/*-----------------------------------------------------------*/
+
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert )
+{
+ BlockLink_t * pxIterator;
+ uint8_t * puc;
+
+ /* Iterate through the list until a block is found that has a higher address
+ * than the block being inserted. */
+ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
+ {
+ /* Nothing to do here, just iterate to the right position. */
+ }
+
+ /* Do the block being inserted, and the block it is being inserted after
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxIterator;
+
+ if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
+ {
+ pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
+ pxBlockToInsert = pxIterator;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Do the block being inserted, and the block it is being inserted before
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxBlockToInsert;
+
+ if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
+ {
+ if( pxIterator->pxNextFreeBlock != pxEnd )
+ {
+ /* Form one big block from the two blocks. */
+ pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxEnd;
+ }
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
+ }
+
+ /* If the block being inserted plugged a gab, so was merged with the block
+ * before and the block after, then it's pxNextFreeBlock pointer will have
+ * already been set, and should not be set here as that would make it point
+ * to itself. */
+ if( pxIterator != pxBlockToInsert )
+ {
+ pxIterator->pxNextFreeBlock = pxBlockToInsert;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void * pvPortMalloc( size_t xWantedSize )
+{
+ BlockLink_t * pxBlock;
+ BlockLink_t * pxPreviousBlock;
+ BlockLink_t * pxNewBlockLink;
+ void * pvReturn = NULL;
+
+ /* If this is the first call to malloc then the heap will require
+ * initialisation to setup the list of free blocks. */
+ if( pxEnd == NULL )
+ {
+ prvHeapInit();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Check the requested block size is not so large that the top bit is set.
+ * The top bit of the block size member of the BlockLink_t structure is used
+ * to determine who owns the block - the application or the kernel, so it
+ * must be free. */
+ if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
+ {
+ /* The wanted size is increased so it can contain a BlockLink_t
+ * structure in addition to the requested amount of bytes. */
+ if( xWantedSize > 0 )
+ {
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number of
+ * bytes. */
+ if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
+ secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
+ {
+ /* Traverse the list from the start (lowest address) block until
+ * one of adequate size is found. */
+ pxPreviousBlock = &xStart;
+ pxBlock = xStart.pxNextFreeBlock;
+
+ while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
+ {
+ pxPreviousBlock = pxBlock;
+ pxBlock = pxBlock->pxNextFreeBlock;
+ }
+
+ /* If the end marker was reached then a block of adequate size was
+ * not found. */
+ if( pxBlock != pxEnd )
+ {
+ /* Return the memory space pointed to - jumping over the
+ * BlockLink_t structure at its start. */
+ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
+
+ /* This block is being returned for use so must be taken out
+ * of the list of free blocks. */
+ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+ /* If the block is larger than required it can be split into
+ * two. */
+ if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
+ {
+ /* This block is to be split into two. Create a new
+ * block following the number of bytes requested. The void
+ * cast is used to prevent byte alignment warnings from the
+ * compiler. */
+ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
+ secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+
+ /* Calculate the sizes of two blocks split from the single
+ * block. */
+ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+ pxBlock->xBlockSize = xWantedSize;
+
+ /* Insert the new block into the list of free blocks. */
+ prvInsertBlockIntoFreeList( pxNewBlockLink );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xFreeBytesRemaining -= pxBlock->xBlockSize;
+
+ if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
+ {
+ xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The block is being returned - it is allocated and owned by
+ * the application and has no "next" block. */
+ pxBlock->xBlockSize |= xBlockAllocatedBit;
+ pxBlock->pxNextFreeBlock = NULL;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ traceMALLOC( pvReturn, xWantedSize );
+
+ #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
+ {
+ if( pvReturn == NULL )
+ {
+ extern void vApplicationMallocFailedHook( void );
+ vApplicationMallocFailedHook();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */
+
+ secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree( void * pv )
+{
+ uint8_t * puc = ( uint8_t * ) pv;
+ BlockLink_t * pxLink;
+
+ if( pv != NULL )
+ {
+ /* The memory being freed will have an BlockLink_t structure immediately
+ * before it. */
+ puc -= xHeapStructSize;
+
+ /* This casting is to keep the compiler from issuing warnings. */
+ pxLink = ( void * ) puc;
+
+ /* Check the block is actually allocated. */
+ secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
+ secureportASSERT( pxLink->pxNextFreeBlock == NULL );
+
+ if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
+ {
+ if( pxLink->pxNextFreeBlock == NULL )
+ {
+ /* The block is being returned to the heap - it is no longer
+ * allocated. */
+ pxLink->xBlockSize &= ~xBlockAllocatedBit;
+
+ secureportDISABLE_NON_SECURE_INTERRUPTS();
+ {
+ /* Add this block to the list of free blocks. */
+ xFreeBytesRemaining += pxLink->xBlockSize;
+ traceFREE( pv, pxLink->xBlockSize );
+ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
+ }
+ secureportENABLE_NON_SECURE_INTERRUPTS();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize( void )
+{
+ return xFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetMinimumEverFreeHeapSize( void )
+{
+ return xMinimumEverFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_heap.h b/Source/portable/GCC/ARM_CM55/secure/secure_heap.h
new file mode 100644
index 0000000..75c9cb0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_heap.h
@@ -0,0 +1,66 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_HEAP_H__
+#define __SECURE_HEAP_H__
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/**
+ * @brief Allocates memory from heap.
+ *
+ * @param[in] xWantedSize The size of the memory to be allocated.
+ *
+ * @return Pointer to the memory region if the allocation is successful, NULL
+ * otherwise.
+ */
+void * pvPortMalloc( size_t xWantedSize );
+
+/**
+ * @brief Frees the previously allocated memory.
+ *
+ * @param[in] pv Pointer to the memory to be freed.
+ */
+void vPortFree( void * pv );
+
+/**
+ * @brief Get the free heap size.
+ *
+ * @return Free heap size.
+ */
+size_t xPortGetFreeHeapSize( void );
+
+/**
+ * @brief Get the minimum ever free heap size.
+ *
+ * @return Minimum ever free heap size.
+ */
+size_t xPortGetMinimumEverFreeHeapSize( void );
+
+#endif /* __SECURE_HEAP_H__ */
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_init.c b/Source/portable/GCC/ARM_CM55/secure/secure_init.c
new file mode 100644
index 0000000..f93bfce
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_init.c
@@ -0,0 +1,106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure init includes. */
+#include "secure_init.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
+#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
+#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
+#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
+#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define secureinitFPCCR_LSPENS_POS ( 29UL )
+#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
+#define secureinitFPCCR_TS_POS ( 26UL )
+#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
+
+#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
+#define secureinitNSACR_CP10_POS ( 10UL )
+#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
+#define secureinitNSACR_CP11_POS ( 11UL )
+#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
+ ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
+ ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
+ * permitted. CP11 should be programmed to the same value as CP10. */
+ *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
+
+ /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
+ * that we can enable/disable lazy stacking in port.c file. */
+ *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK );
+
+ /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
+ * registers (S16-S31) are also pushed to stack on exception entry and
+ * restored on exception return. */
+ *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_init.h b/Source/portable/GCC/ARM_CM55/secure/secure_init.h
new file mode 100644
index 0000000..e6c9da0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_init.h
@@ -0,0 +1,54 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_INIT_H__
+#define __SECURE_INIT_H__
+
+/**
+ * @brief De-prioritizes the non-secure exceptions.
+ *
+ * This is needed to ensure that the non-secure PendSV runs at the lowest
+ * priority. Context switch is done in the non-secure PendSV handler.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_DePrioritizeNSExceptions( void );
+
+/**
+ * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access.
+ *
+ * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point
+ * Registers are not leaked to the non-secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_EnableNSFPUAccess( void );
+
+#endif /* __SECURE_INIT_H__ */
diff --git a/Source/portable/GCC/ARM_CM55/secure/secure_port_macros.h b/Source/portable/GCC/ARM_CM55/secure/secure_port_macros.h
new file mode 100644
index 0000000..d7ac583
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55/secure/secure_port_macros.h
@@ -0,0 +1,140 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_PORT_MACROS_H__
+#define __SECURE_PORT_MACROS_H__
+
+/**
+ * @brief Byte alignment requirements.
+ */
+#define secureportBYTE_ALIGNMENT 8
+#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
+
+/**
+ * @brief Macro to declare a function as non-secure callable.
+ */
+#if defined( __IAR_SYSTEMS_ICC__ )
+ #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root
+#else
+ #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) )
+#endif
+
+/**
+ * @brief Set the secure PRIMASK value.
+ */
+#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Set the non-secure PRIMASK value.
+ */
+#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Read the PSP value in the given variable.
+ */
+#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
+ __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
+
+/**
+ * @brief Set the PSP to the given value.
+ */
+#define secureportSET_PSP( pucCurrentStackPointer ) \
+ __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
+
+/**
+ * @brief Read the PSPLIM value in the given variable.
+ */
+#define secureportREAD_PSPLIM( pucOutStackLimit ) \
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) )
+
+/**
+ * @brief Set the PSPLIM to the given value.
+ */
+#define secureportSET_PSPLIM( pucStackLimit ) \
+ __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
+
+/**
+ * @brief Set the NonSecure MSP to the given value.
+ */
+#define secureportSET_MSP_NS( pucMainStackPointer ) \
+ __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
+
+/**
+ * @brief Set the CONTROL register to the given value.
+ */
+#define secureportSET_CONTROL( ulControl ) \
+ __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
+
+/**
+ * @brief Read the Interrupt Program Status Register (IPSR) value in the given
+ * variable.
+ */
+#define secureportREAD_IPSR( ulIPSR ) \
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
+
+/**
+ * @brief PRIMASK value to enable interrupts.
+ */
+#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
+
+/**
+ * @brief PRIMASK value to disable interrupts.
+ */
+#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
+
+/**
+ * @brief Disable secure interrupts.
+ */
+#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Disable non-secure interrupts.
+ *
+ * This effectively disables context switches.
+ */
+#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Enable non-secure interrupts.
+ */
+#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Assert definition.
+ */
+#define secureportASSERT( x ) \
+ if( ( x ) == 0 ) \
+ { \
+ secureportDISABLE_SECURE_INTERRUPTS(); \
+ secureportDISABLE_NON_SECURE_INTERRUPTS(); \
+ for( ; ; ) {; } \
+ }
+
+#endif /* __SECURE_PORT_MACROS_H__ */
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
new file mode 100644
index 0000000..b3f6a0a
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
@@ -0,0 +1,499 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION
+ * is defined correctly and privileged functions are placed in correct sections. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Portasm includes. */
+#include "portasm.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
+ * header files. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n" /* Finally, branch to EXC_RETURN. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vResetPrivilege( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::: "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, psp \n" /* Read PSP in r0. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " mrs r2, psplim \n" /* r2 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " str r0, [r1] \n" /* Save the new top of stack in TCB. */
+ " \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+ " \n"
+ " ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
+ " msr psp, r0 \n" /* Remember the new top of stack for the task. */
+ " bx r3 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h
new file mode 100644
index 0000000..880205c
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h
@@ -0,0 +1,78 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M55"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __attribute__( ( used ) )
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM7/ReadMe.txt b/Source/portable/GCC/ARM_CM7/ReadMe.txt
index 4cf25c5..90be0b2 100644
--- a/Source/portable/GCC/ARM_CM7/ReadMe.txt
+++ b/Source/portable/GCC/ARM_CM7/ReadMe.txt
@@ -1,8 +1,8 @@
There are two options for running FreeRTOS on ARM Cortex-M7 microcontrollers.
The best option depends on the revision of the ARM Cortex-M7 core in use. The
revision is specified by an 'r' number, and a 'p' number, so will look something
-like 'r0p1'. Check the documentation for the microcontroller in use to find the
-revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
+like 'r0p1'. Check the documentation for the microcontroller in use to find the
+revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
the FreeRTOS port provided specifically for r0p1 revisions, as that can be used
with all core revisions.
@@ -10,9 +10,9 @@
use the Cortex-M7 r0p1 port - the latter containing a minor errata workaround.
If the revision of the ARM Cortex-M7 core is not r0p1 then either option can be
-used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
+used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
the /FreeRTOS/Source/portable/GCC/ARM_CM4F directory.
If the revision of the ARM Cortex-M7 core is r0p1 then use the FreeRTOS ARM
Cortex-M7 r0p1 port located in the /FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1
-directory.
\ No newline at end of file
+directory.
diff --git a/Source/portable/GCC/ARM_CM7/r0p1/port.c b/Source/portable/GCC/ARM_CM7/r0p1/port.c
index bbba49e..afd4baa 100644
--- a/Source/portable/GCC/ARM_CM7/r0p1/port.c
+++ b/Source/portable/GCC/ARM_CM7/r0p1/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -52,8 +52,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -244,18 +245,18 @@
void vPortSVCHandler( void )
{
__asm volatile (
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
- " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */
+ " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
+ " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
+ " msr psp, r0 \n" /* Restore the task stack pointer. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
/*-----------------------------------------------------------*/
@@ -267,19 +268,19 @@
* would otherwise result in the unnecessary leaving of space in the SVC stack
* for lazy saving of FPU registers. */
__asm volatile (
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */
- " msr control, r0 \n"
- " cpsie i \n"/* Globally enable interrupts. */
- " cpsie f \n"
- " dsb \n"
- " isb \n"
- " svc 0 \n"/* System call to start first task. */
- " nop \n"
- " .ltorg \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */
+ " msr control, r0 \n"
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc 0 \n" /* System call to start first task. */
+ " nop \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -289,13 +290,10 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -305,7 +303,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -317,33 +315,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -352,7 +370,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -433,54 +451,54 @@
__asm volatile
(
- " mrs r0, psp \n"
- " isb \n"
- " \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */
- " it eq \n"
- " vstmdbeq r0!, {s16-s31} \n"
- " \n"
- " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
- " \n"
- " stmdb sp!, {r0, r3} \n"
- " mov r0, %0 \n"
- " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r0, r3} \n"
- " \n"
- " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
- " ldr r0, [r1] \n"
- " \n"
- " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */
- " \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */
- " it eq \n"
- " vldmiaeq r0!, {s16-s31} \n"
- " \n"
- " msr psp, r0 \n"
- " isb \n"
- " \n"
+ " mrs r0, psp \n"
+ " isb \n"
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */
+ " ldr r2, [r3] \n"
+ " \n"
+ " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */
+ " it eq \n"
+ " vstmdbeq r0!, {s16-s31} \n"
+ " \n"
+ " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */
+ " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */
+ " \n"
+ " stmdb sp!, {r0, r3} \n"
+ " mov r0, %0 \n"
+ " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " ldmia sp!, {r0, r3} \n"
+ " \n"
+ " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r0, [r1] \n"
+ " \n"
+ " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */
+ " \n"
+ " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */
+ " it eq \n"
+ " vldmiaeq r0!, {s16-s31} \n"
+ " \n"
+ " msr psp, r0 \n"
+ " isb \n"
+ " \n"
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */
#if WORKAROUND_PMU_CM001 == 1
- " push { r14 } \n"
- " pop { pc } \n"
+ " push { r14 } \n"
+ " pop { pc } \n"
#endif
#endif
- " \n"
- " bx r14 \n"
- " \n"
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " \n"
+ " bx r14 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -757,13 +775,13 @@
{
__asm volatile
(
- " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */
- " ldr r1, [r0] \n"
- " \n"
- " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */
- " str r1, [r0] \n"
- " bx r14 \n"
- " .ltorg \n"
+ " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */
+ " ldr r1, [r0] \n"
+ " \n"
+ " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */
+ " str r1, [r0] \n"
+ " bx r14 \n"
+ " .ltorg \n"
);
}
/*-----------------------------------------------------------*/
@@ -796,10 +814,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h b/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h
index c66c6d0..8f08bd0 100644
--- a/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h
+++ b/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -30,9 +30,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -57,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -197,12 +201,12 @@
__asm volatile
(
- " mov %0, %1 \n"\
- " cpsid i \n"\
- " msr basepri, %0 \n"\
- " isb \n"\
- " dsb \n"\
- " cpsie i \n"\
+ " mov %0, %1 \n"\
+ " cpsid i \n"\
+ " msr basepri, %0 \n"\
+ " isb \n"\
+ " dsb \n"\
+ " cpsie i \n"\
: "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@@ -215,13 +219,13 @@
__asm volatile
(
- " mrs %0, basepri \n"\
- " mov %1, %2 \n"\
- " cpsid i \n"\
- " msr basepri, %1 \n"\
- " isb \n"\
- " dsb \n"\
- " cpsie i \n"\
+ " mrs %0, basepri \n"\
+ " mov %1, %2 \n"\
+ " cpsid i \n"\
+ " msr basepri, %1 \n"\
+ " isb \n"\
+ " dsb \n"\
+ " cpsie i \n"\
: "=r" ( ulOriginalBASEPRI ), "=r" ( ulNewBASEPRI ) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
@@ -235,15 +239,17 @@
{
__asm volatile
(
- " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
+ " msr basepri, %0 "::"r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/port.c b/Source/portable/GCC/ARM_CM85/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/portasm.c b/Source/portable/GCC/ARM_CM85/non_secure/portasm.c
new file mode 100644
index 0000000..7431c98
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/portasm.c
@@ -0,0 +1,608 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION
+ * is defined correctly and privileged functions are placed in correct sections. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Portasm includes. */
+#include "portasm.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
+ * header files. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ "xSecureContextConst2: .word xSecureContext \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vResetPrivilege( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::: "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " mrs r2, psp \n" /* Read PSP in r2. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " push {r0-r2, r14} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* LR = r3. */
+ " lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
+ " subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " b select_next_task \n"
+ " \n"
+ " save_ns_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n" /* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n" /* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n" /* r1 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n" /* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+ " \n"
+ " ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n" /* LR = r4. */
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n" /* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n" /* LR = r4. */
+ " lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " restore_ns_context: \n"
+ " ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " msr psp, r2 \n" /* Remember the new top of stack for the task. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ "xSecureContextConst: .word xSecureContext \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " svc %0 \n" /* Secure context is allocated in the supervisor call. */
+ " bx lr \n" /* Return. */
+ ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
+ " ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
+ " cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
+ " it ne \n"
+ " svcne %0 \n" /* Secure context is freed in the supervisor call. */
+ " bx lr \n" /* Return. */
+ ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/portasm.h b/Source/portable/GCC/ARM_CM85/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM85/non_secure/portmacro.h
new file mode 100644
index 0000000..f606f81
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/portmacro.h
@@ -0,0 +1,78 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M85"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __attribute__( ( used ) )
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_context.c b/Source/portable/GCC/ARM_CM85/secure/secure_context.c
new file mode 100644
index 0000000..e37dd96
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_context.c
@@ -0,0 +1,351 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief CONTROL value for privileged tasks.
+ *
+ * Bit[0] - 0 --> Thread mode is privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
+
+/**
+ * @brief CONTROL value for un-privileged tasks.
+ *
+ * Bit[0] - 1 --> Thread mode is un-privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
+
+/**
+ * @brief Size of stack seal values in bytes.
+ */
+#define securecontextSTACK_SEAL_SIZE 8
+
+/**
+ * @brief Stack seal value as recommended by ARM.
+ */
+#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5
+
+/**
+ * @brief Maximum number of secure contexts.
+ */
+#ifndef secureconfigMAX_SECURE_CONTEXTS
+ #define secureconfigMAX_SECURE_CONTEXTS 8UL
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Pre-allocated array of secure contexts.
+ */
+SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ];
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Get a free secure context for a task from the secure context pool (xSecureContexts).
+ *
+ * This function ensures that only one secure context is allocated for a task.
+ *
+ * @param[in] pvTaskHandle The task handle for which the secure context is allocated.
+ *
+ * @return Index of a free secure context in the xSecureContexts array.
+ */
+static uint32_t ulGetSecureContext( void * pvTaskHandle );
+
+/**
+ * @brief Return the secure context to the secure context pool (xSecureContexts).
+ *
+ * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array.
+ */
+static void vReturnSecureContext( uint32_t ulSecureContextIndex );
+
+/* These are implemented in assembly. */
+extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext );
+extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext );
+/*-----------------------------------------------------------*/
+
+static uint32_t ulGetSecureContext( void * pvTaskHandle )
+{
+ /* Start with invalid index. */
+ uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) &&
+ ( xSecureContexts[ i ].pucStackLimit == NULL ) &&
+ ( xSecureContexts[ i ].pucStackStart == NULL ) &&
+ ( xSecureContexts[ i ].pvTaskHandle == NULL ) &&
+ ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = i;
+ }
+ else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle )
+ {
+ /* A task can only have one secure context. Do not allocate a second
+ * context for the same task. */
+ ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+ break;
+ }
+ }
+
+ return ulSecureContextIndex;
+}
+/*-----------------------------------------------------------*/
+
+static void vReturnSecureContext( uint32_t ulSecureContextIndex )
+{
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
+{
+ uint32_t ulIPSR, i;
+ static uint32_t ulSecureContextsInitialized = 0;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) )
+ {
+ /* Ensure to initialize secure contexts only once. */
+ ulSecureContextsInitialized = 1;
+
+ /* No stack for thread mode until a task's context is loaded. */
+ secureportSET_PSPLIM( securecontextNO_STACK );
+ secureportSET_PSP( securecontextNO_STACK );
+
+ /* Initialize all secure contexts. */
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ xSecureContexts[ i ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ i ].pucStackLimit = NULL;
+ xSecureContexts[ i ].pucStackStart = NULL;
+ xSecureContexts[ i ].pvTaskHandle = NULL;
+ }
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Configure thread mode to use PSP and to be unprivileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Configure thread mode to use PSP and to be privileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
+ }
+ #endif /* configENABLE_MPU */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle )
+#else /* configENABLE_MPU */
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle )
+#endif /* configENABLE_MPU */
+{
+ uint8_t * pucStackMemory = NULL;
+ uint8_t * pucStackLimit;
+ uint32_t ulIPSR, ulSecureContextIndex;
+ SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID;
+
+ #if ( configENABLE_MPU == 1 )
+ uint32_t * pulCurrentStackPointer = NULL;
+ #endif /* configENABLE_MPU */
+
+ /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit
+ * Register (PSPLIM) value. */
+ secureportREAD_IPSR( ulIPSR );
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode.
+ * Also do nothing, if a secure context us already loaded. PSPLIM is set to
+ * securecontextNO_STACK when no secure context is loaded. */
+ if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) )
+ {
+ /* Ontain a free secure context. */
+ ulSecureContextIndex = ulGetSecureContext( pvTaskHandle );
+
+ /* Were we able to get a free context? */
+ if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS )
+ {
+ /* Allocate the stack space. */
+ pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE );
+
+ if( pucStackMemory != NULL )
+ {
+ /* Since stack grows down, the starting point will be the last
+ * location. Note that this location is next to the last
+ * allocated byte for stack (excluding the space for seal values)
+ * because the hardware decrements the stack pointer before
+ * writing i.e. if stack pointer is 0x2, a push operation will
+ * decrement the stack pointer to 0x1 and then write at 0x1. */
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize;
+
+ /* Seal the created secure process stack. */
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE;
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE;
+
+ /* The stack cannot go beyond this location. This value is
+ * programmed in the PSPLIM register on context switch.*/
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory;
+
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Store the correct CONTROL value for the task on the stack.
+ * This value is programmed in the CONTROL register on
+ * context switch. */
+ pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ pulCurrentStackPointer--;
+
+ if( ulIsTaskPrivileged )
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
+ }
+ else
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
+ }
+
+ /* Store the current stack pointer. This value is programmed in
+ * the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Current SP is set to the starting of the stack. This
+ * value programmed in the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Ensure to never return 0 as a valid context handle. */
+ xSecureContextHandle = ulSecureContextIndex + 1UL;
+ }
+ }
+ }
+
+ return xSecureContextHandle;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint32_t ulIPSR, ulSecureContextIndex;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* Only free if a valid context handle is passed. */
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ /* Ensure that the secure context being deleted is associated with
+ * the task. */
+ if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle )
+ {
+ /* Free the stack space. */
+ vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit );
+
+ /* Return the secure context back to the free secure contexts pool. */
+ vReturnSecureContext( ulSecureContextIndex );
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that no secure context is loaded and the task is loading it's
+ * own context. */
+ if( ( pucStackLimit == securecontextNO_STACK ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that task's context is loaded and the task is saving it's own
+ * context. */
+ if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_context.h b/Source/portable/GCC/ARM_CM85/secure/secure_context.h
new file mode 100644
index 0000000..2220ea6
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_context.h
@@ -0,0 +1,135 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_CONTEXT_H__
+#define __SECURE_CONTEXT_H__
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* FreeRTOS includes. */
+#include "FreeRTOSConfig.h"
+
+/**
+ * @brief PSP value when no secure context is loaded.
+ */
+#define securecontextNO_STACK 0x0
+
+/**
+ * @brief Invalid context ID.
+ */
+#define securecontextINVALID_CONTEXT_ID 0UL
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Structure to represent a secure context.
+ *
+ * @note Since stack grows down, pucStackStart is the highest address while
+ * pucStackLimit is the first address of the allocated memory.
+ */
+typedef struct SecureContext
+{
+ uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
+ uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
+ uint8_t * pucStackStart; /**< First location of the stack memory. */
+ void * pvTaskHandle; /**< Task handle of the task this context is associated with. */
+} SecureContext_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Opaque handle for a secure context.
+ */
+typedef uint32_t SecureContextHandle_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Initializes the secure context management system.
+ *
+ * PSP is set to NULL and therefore a task must allocate and load a context
+ * before calling any secure side function in the thread mode.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureContext_Init( void );
+
+/**
+ * @brief Allocates a context on the secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] ulSecureStackSize Size of the stack to allocate on secure side.
+ * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise.
+ *
+ * @return Opaque context handle if context is successfully allocated, NULL
+ * otherwise.
+ */
+#if ( configENABLE_MPU == 1 )
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle );
+#else /* configENABLE_MPU */
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle );
+#endif /* configENABLE_MPU */
+
+/**
+ * @brief Frees the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the
+ * context to be freed.
+ */
+void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Loads the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be loaded.
+ */
+void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Saves the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be saved.
+ */
+void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+#endif /* __SECURE_CONTEXT_H__ */
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_context_port.c b/Source/portable/GCC/ARM_CM85/secure/secure_context_port.c
new file mode 100644
index 0000000..d70822c
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_context_port.c
@@ -0,0 +1,97 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) );
+void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) );
+
+void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext )
+{
+ /* pxSecureContext value is in r0. */
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r1, ipsr \n" /* r1 = IPSR. */
+ " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
+ " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */
+ " \n"
+ #if ( configENABLE_MPU == 1 )
+ " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
+ " msr control, r3 \n" /* CONTROL = r3. */
+ #endif /* configENABLE_MPU */
+ " \n"
+ " msr psplim, r2 \n" /* PSPLIM = r2. */
+ " msr psp, r1 \n" /* PSP = r1. */
+ " \n"
+ " load_ctx_therad_mode: \n"
+ " bx lr \n"
+ " \n"
+ ::: "r0", "r1", "r2"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext )
+{
+ /* pxSecureContext value is in r0. */
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r1, ipsr \n" /* r1 = IPSR. */
+ " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
+ " mrs r1, psp \n" /* r1 = PSP. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */
+ " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ #if ( configENABLE_MPU == 1 )
+ " mrs r2, control \n" /* r2 = CONTROL. */
+ " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
+ #endif /* configENABLE_MPU */
+ " \n"
+ " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */
+ " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
+ " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
+ " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
+ " \n"
+ " save_ctx_therad_mode: \n"
+ " bx lr \n"
+ " \n"
+ ::"i" ( securecontextNO_STACK ) : "r1", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_heap.c b/Source/portable/GCC/ARM_CM85/secure/secure_heap.c
new file mode 100644
index 0000000..19f7c23
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_heap.c
@@ -0,0 +1,454 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure context heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Total heap size.
+ */
+#ifndef secureconfigTOTAL_HEAP_SIZE
+ #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
+#endif
+
+/* No test marker by default. */
+#ifndef mtCOVERAGE_TEST_MARKER
+ #define mtCOVERAGE_TEST_MARKER()
+#endif
+
+/* No tracing by default. */
+#ifndef traceMALLOC
+ #define traceMALLOC( pvReturn, xWantedSize )
+#endif
+
+/* No tracing by default. */
+#ifndef traceFREE
+ #define traceFREE( pv, xBlockSize )
+#endif
+
+/* Block sizes must not get too small. */
+#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
+
+/* Assumes 8bit bytes! */
+#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
+/*-----------------------------------------------------------*/
+
+/* Allocate the memory for the heap. */
+#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
+
+/* The application writer has already defined the array used for the RTOS
+* heap - probably so it can be placed in a special segment or address. */
+ extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#else /* configAPPLICATION_ALLOCATED_HEAP */
+ static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/**
+ * @brief The linked list structure.
+ *
+ * This is used to link free blocks in order of their memory address.
+ */
+typedef struct A_BLOCK_LINK
+{
+ struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
+ size_t xBlockSize; /**< The size of the free block. */
+} BlockLink_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Called automatically to setup the required heap structures the first
+ * time pvPortMalloc() is called.
+ */
+static void prvHeapInit( void );
+
+/**
+ * @brief Inserts a block of memory that is being freed into the correct
+ * position in the list of free memory blocks.
+ *
+ * The block being freed will be merged with the block in front it and/or the
+ * block behind it if the memory blocks are adjacent to each other.
+ *
+ * @param[in] pxBlockToInsert The block being freed.
+ */
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The size of the structure placed at the beginning of each allocated
+ * memory block must by correctly byte aligned.
+ */
+static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+
+/**
+ * @brief Create a couple of list links to mark the start and end of the list.
+ */
+static BlockLink_t xStart;
+static BlockLink_t * pxEnd = NULL;
+
+/**
+ * @brief Keeps track of the number of free bytes remaining, but says nothing
+ * about fragmentation.
+ */
+static size_t xFreeBytesRemaining = 0U;
+static size_t xMinimumEverFreeBytesRemaining = 0U;
+
+/**
+ * @brief Gets set to the top bit of an size_t type.
+ *
+ * When this bit in the xBlockSize member of an BlockLink_t structure is set
+ * then the block belongs to the application. When the bit is free the block is
+ * still part of the free heap space.
+ */
+static size_t xBlockAllocatedBit = 0;
+/*-----------------------------------------------------------*/
+
+static void prvHeapInit( void )
+{
+ BlockLink_t * pxFirstFreeBlock;
+ uint8_t * pucAlignedHeap;
+ size_t uxAddress;
+ size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
+
+ /* Ensure the heap starts on a correctly aligned boundary. */
+ uxAddress = ( size_t ) ucHeap;
+
+ if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
+ {
+ uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
+ }
+
+ pucAlignedHeap = ( uint8_t * ) uxAddress;
+
+ /* xStart is used to hold a pointer to the first item in the list of free
+ * blocks. The void cast is used to prevent compiler warnings. */
+ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
+ xStart.xBlockSize = ( size_t ) 0;
+
+ /* pxEnd is used to mark the end of the list of free blocks and is inserted
+ * at the end of the heap space. */
+ uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
+ uxAddress -= xHeapStructSize;
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ pxEnd = ( void * ) uxAddress;
+ pxEnd->xBlockSize = 0;
+ pxEnd->pxNextFreeBlock = NULL;
+
+ /* To start with there is a single free block that is sized to take up the
+ * entire heap space, minus the space taken by pxEnd. */
+ pxFirstFreeBlock = ( void * ) pucAlignedHeap;
+ pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
+ pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
+
+ /* Only one block exists - and it covers the entire usable heap space. */
+ xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+ xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+
+ /* Work out the position of the top bit in a size_t variable. */
+ xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
+}
+/*-----------------------------------------------------------*/
+
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert )
+{
+ BlockLink_t * pxIterator;
+ uint8_t * puc;
+
+ /* Iterate through the list until a block is found that has a higher address
+ * than the block being inserted. */
+ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
+ {
+ /* Nothing to do here, just iterate to the right position. */
+ }
+
+ /* Do the block being inserted, and the block it is being inserted after
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxIterator;
+
+ if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
+ {
+ pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
+ pxBlockToInsert = pxIterator;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Do the block being inserted, and the block it is being inserted before
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxBlockToInsert;
+
+ if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
+ {
+ if( pxIterator->pxNextFreeBlock != pxEnd )
+ {
+ /* Form one big block from the two blocks. */
+ pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxEnd;
+ }
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
+ }
+
+ /* If the block being inserted plugged a gab, so was merged with the block
+ * before and the block after, then it's pxNextFreeBlock pointer will have
+ * already been set, and should not be set here as that would make it point
+ * to itself. */
+ if( pxIterator != pxBlockToInsert )
+ {
+ pxIterator->pxNextFreeBlock = pxBlockToInsert;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void * pvPortMalloc( size_t xWantedSize )
+{
+ BlockLink_t * pxBlock;
+ BlockLink_t * pxPreviousBlock;
+ BlockLink_t * pxNewBlockLink;
+ void * pvReturn = NULL;
+
+ /* If this is the first call to malloc then the heap will require
+ * initialisation to setup the list of free blocks. */
+ if( pxEnd == NULL )
+ {
+ prvHeapInit();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Check the requested block size is not so large that the top bit is set.
+ * The top bit of the block size member of the BlockLink_t structure is used
+ * to determine who owns the block - the application or the kernel, so it
+ * must be free. */
+ if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
+ {
+ /* The wanted size is increased so it can contain a BlockLink_t
+ * structure in addition to the requested amount of bytes. */
+ if( xWantedSize > 0 )
+ {
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number of
+ * bytes. */
+ if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
+ secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
+ {
+ /* Traverse the list from the start (lowest address) block until
+ * one of adequate size is found. */
+ pxPreviousBlock = &xStart;
+ pxBlock = xStart.pxNextFreeBlock;
+
+ while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
+ {
+ pxPreviousBlock = pxBlock;
+ pxBlock = pxBlock->pxNextFreeBlock;
+ }
+
+ /* If the end marker was reached then a block of adequate size was
+ * not found. */
+ if( pxBlock != pxEnd )
+ {
+ /* Return the memory space pointed to - jumping over the
+ * BlockLink_t structure at its start. */
+ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
+
+ /* This block is being returned for use so must be taken out
+ * of the list of free blocks. */
+ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+ /* If the block is larger than required it can be split into
+ * two. */
+ if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
+ {
+ /* This block is to be split into two. Create a new
+ * block following the number of bytes requested. The void
+ * cast is used to prevent byte alignment warnings from the
+ * compiler. */
+ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
+ secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+
+ /* Calculate the sizes of two blocks split from the single
+ * block. */
+ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+ pxBlock->xBlockSize = xWantedSize;
+
+ /* Insert the new block into the list of free blocks. */
+ prvInsertBlockIntoFreeList( pxNewBlockLink );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xFreeBytesRemaining -= pxBlock->xBlockSize;
+
+ if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
+ {
+ xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The block is being returned - it is allocated and owned by
+ * the application and has no "next" block. */
+ pxBlock->xBlockSize |= xBlockAllocatedBit;
+ pxBlock->pxNextFreeBlock = NULL;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ traceMALLOC( pvReturn, xWantedSize );
+
+ #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
+ {
+ if( pvReturn == NULL )
+ {
+ extern void vApplicationMallocFailedHook( void );
+ vApplicationMallocFailedHook();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */
+
+ secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree( void * pv )
+{
+ uint8_t * puc = ( uint8_t * ) pv;
+ BlockLink_t * pxLink;
+
+ if( pv != NULL )
+ {
+ /* The memory being freed will have an BlockLink_t structure immediately
+ * before it. */
+ puc -= xHeapStructSize;
+
+ /* This casting is to keep the compiler from issuing warnings. */
+ pxLink = ( void * ) puc;
+
+ /* Check the block is actually allocated. */
+ secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
+ secureportASSERT( pxLink->pxNextFreeBlock == NULL );
+
+ if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
+ {
+ if( pxLink->pxNextFreeBlock == NULL )
+ {
+ /* The block is being returned to the heap - it is no longer
+ * allocated. */
+ pxLink->xBlockSize &= ~xBlockAllocatedBit;
+
+ secureportDISABLE_NON_SECURE_INTERRUPTS();
+ {
+ /* Add this block to the list of free blocks. */
+ xFreeBytesRemaining += pxLink->xBlockSize;
+ traceFREE( pv, pxLink->xBlockSize );
+ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
+ }
+ secureportENABLE_NON_SECURE_INTERRUPTS();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize( void )
+{
+ return xFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetMinimumEverFreeHeapSize( void )
+{
+ return xMinimumEverFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_heap.h b/Source/portable/GCC/ARM_CM85/secure/secure_heap.h
new file mode 100644
index 0000000..75c9cb0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_heap.h
@@ -0,0 +1,66 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_HEAP_H__
+#define __SECURE_HEAP_H__
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/**
+ * @brief Allocates memory from heap.
+ *
+ * @param[in] xWantedSize The size of the memory to be allocated.
+ *
+ * @return Pointer to the memory region if the allocation is successful, NULL
+ * otherwise.
+ */
+void * pvPortMalloc( size_t xWantedSize );
+
+/**
+ * @brief Frees the previously allocated memory.
+ *
+ * @param[in] pv Pointer to the memory to be freed.
+ */
+void vPortFree( void * pv );
+
+/**
+ * @brief Get the free heap size.
+ *
+ * @return Free heap size.
+ */
+size_t xPortGetFreeHeapSize( void );
+
+/**
+ * @brief Get the minimum ever free heap size.
+ *
+ * @return Minimum ever free heap size.
+ */
+size_t xPortGetMinimumEverFreeHeapSize( void );
+
+#endif /* __SECURE_HEAP_H__ */
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_init.c b/Source/portable/GCC/ARM_CM85/secure/secure_init.c
new file mode 100644
index 0000000..f93bfce
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_init.c
@@ -0,0 +1,106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure init includes. */
+#include "secure_init.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
+#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
+#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
+#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
+#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define secureinitFPCCR_LSPENS_POS ( 29UL )
+#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
+#define secureinitFPCCR_TS_POS ( 26UL )
+#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
+
+#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
+#define secureinitNSACR_CP10_POS ( 10UL )
+#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
+#define secureinitNSACR_CP11_POS ( 11UL )
+#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
+ ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
+ ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
+ * permitted. CP11 should be programmed to the same value as CP10. */
+ *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
+
+ /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
+ * that we can enable/disable lazy stacking in port.c file. */
+ *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK );
+
+ /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
+ * registers (S16-S31) are also pushed to stack on exception entry and
+ * restored on exception return. */
+ *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_init.h b/Source/portable/GCC/ARM_CM85/secure/secure_init.h
new file mode 100644
index 0000000..e6c9da0
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_init.h
@@ -0,0 +1,54 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_INIT_H__
+#define __SECURE_INIT_H__
+
+/**
+ * @brief De-prioritizes the non-secure exceptions.
+ *
+ * This is needed to ensure that the non-secure PendSV runs at the lowest
+ * priority. Context switch is done in the non-secure PendSV handler.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_DePrioritizeNSExceptions( void );
+
+/**
+ * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access.
+ *
+ * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point
+ * Registers are not leaked to the non-secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_EnableNSFPUAccess( void );
+
+#endif /* __SECURE_INIT_H__ */
diff --git a/Source/portable/GCC/ARM_CM85/secure/secure_port_macros.h b/Source/portable/GCC/ARM_CM85/secure/secure_port_macros.h
new file mode 100644
index 0000000..d7ac583
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85/secure/secure_port_macros.h
@@ -0,0 +1,140 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_PORT_MACROS_H__
+#define __SECURE_PORT_MACROS_H__
+
+/**
+ * @brief Byte alignment requirements.
+ */
+#define secureportBYTE_ALIGNMENT 8
+#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
+
+/**
+ * @brief Macro to declare a function as non-secure callable.
+ */
+#if defined( __IAR_SYSTEMS_ICC__ )
+ #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root
+#else
+ #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) )
+#endif
+
+/**
+ * @brief Set the secure PRIMASK value.
+ */
+#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Set the non-secure PRIMASK value.
+ */
+#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Read the PSP value in the given variable.
+ */
+#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
+ __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
+
+/**
+ * @brief Set the PSP to the given value.
+ */
+#define secureportSET_PSP( pucCurrentStackPointer ) \
+ __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
+
+/**
+ * @brief Read the PSPLIM value in the given variable.
+ */
+#define secureportREAD_PSPLIM( pucOutStackLimit ) \
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) )
+
+/**
+ * @brief Set the PSPLIM to the given value.
+ */
+#define secureportSET_PSPLIM( pucStackLimit ) \
+ __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
+
+/**
+ * @brief Set the NonSecure MSP to the given value.
+ */
+#define secureportSET_MSP_NS( pucMainStackPointer ) \
+ __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
+
+/**
+ * @brief Set the CONTROL register to the given value.
+ */
+#define secureportSET_CONTROL( ulControl ) \
+ __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
+
+/**
+ * @brief Read the Interrupt Program Status Register (IPSR) value in the given
+ * variable.
+ */
+#define secureportREAD_IPSR( ulIPSR ) \
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
+
+/**
+ * @brief PRIMASK value to enable interrupts.
+ */
+#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
+
+/**
+ * @brief PRIMASK value to disable interrupts.
+ */
+#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
+
+/**
+ * @brief Disable secure interrupts.
+ */
+#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Disable non-secure interrupts.
+ *
+ * This effectively disables context switches.
+ */
+#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Enable non-secure interrupts.
+ */
+#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Assert definition.
+ */
+#define secureportASSERT( x ) \
+ if( ( x ) == 0 ) \
+ { \
+ secureportDISABLE_SECURE_INTERRUPTS(); \
+ secureportDISABLE_NON_SECURE_INTERRUPTS(); \
+ for( ; ; ) {; } \
+ }
+
+#endif /* __SECURE_PORT_MACROS_H__ */
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..d247c92
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskDelayUntil ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskAbortDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskDelay ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskPriorityGet ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_eTaskGetState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskGetInfo ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetIdleTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSuspend ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskResume ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetTickCount ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetNumberOfTasks ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTaskGetName ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetApplicationTaskTag ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ) : "memory"
+ );
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetSystemState ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetCurrentTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGetSchedulerState ) : "memory"
+ );
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTaskSetTimeOutState ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskCheckForTimeOut ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotify ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyWait ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyTake ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTaskGenericNotifyStateClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_ulTaskGenericNotifyValueClear ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGenericSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueMessagesWaiting ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxQueueSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueuePeek ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSemaphoreTake ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGetMutexHolder ) : "memory"
+ );
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueTakeMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueGiveMutexRecursive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueSelectFromSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xQueueAddToSet ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueAddToRegistry ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vQueueUnregisterQueue ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcQueueGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pvTimerGetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetTimerID ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerIsTimerActive ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandPrivImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGenericCommand ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_pcTimerGetName ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vTimerSetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxTimerGetReloadMode ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetPeriod ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xTimerGetExpiryTime ) : "memory"
+ );
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupWaitBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupClearBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSetBits ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xEventGroupSync ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_uxEventGroupGetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_vEventGroupSetNumber ) : "memory"
+ );
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSend ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferReceive ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsFull ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferIsEmpty ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSpacesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferBytesAvailable ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferSetTriggerLevel ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " \n"
+ : : "i" ( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ) : "memory"
+ );
+ }
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
new file mode 100644
index 0000000..b3f6a0a
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
@@ -0,0 +1,499 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION
+ * is defined correctly and privileged functions are placed in correct sections. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Portasm includes. */
+#include "portasm.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
+ * header files. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#if ( configENABLE_MPU == 1 )
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " \n"
+ " ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n" /* Finally, branch to EXC_RETURN. */
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ " ite ne \n"
+ " movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ " moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ " bx lr \n" /* Return. */
+ " \n"
+ " .align 4 \n"
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* Read the CONTROL register. */
+ " bic r0, #1 \n" /* Clear the bit 0. */
+ " msr control, r0 \n" /* Write back the new CONTROL value. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vResetPrivilege( void ) /* __attribute__ (( naked )) */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, control \n" /* r0 = CONTROL. */
+ " orr r0, #1 \n" /* r0 = r0 | 1. */
+ " msr control, r0 \n" /* CONTROL = r0. */
+ " bx lr \n" /* Return to the caller. */
+ ::: "r0", "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
+ " ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
+ " msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
+ " cpsie i \n" /* Globally enable interrupts. */
+ " cpsie f \n"
+ " dsb \n"
+ " isb \n"
+ " svc %0 \n" /* System call to start the first task. */
+ " nop \n"
+ " \n"
+ " .align 4 \n"
+ "xVTORConst: .word 0xe000ed08 \n"
+ ::"i" ( portSVC_START_SCHEDULER ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
+ " mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " msr basepri, r0 \n" /* basepri = ulMask. */
+ " dsb \n"
+ " isb \n"
+ " bx lr \n" /* Return. */
+ ::: "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#else /* configENABLE_MPU */
+
+ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " mrs r0, psp \n" /* Read PSP in r0. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " mrs r2, psplim \n" /* r2 = PSPLIM. */
+ " mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " str r0, [r1] \n" /* Save the new top of stack in TCB. */
+ " \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+ " \n"
+ " ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ " it eq \n"
+ " vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
+ " msr psp, r0 \n" /* Remember the new top of stack for the task. */
+ " bx r3 \n"
+ " \n"
+ " .align 4 \n"
+ "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "blt syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ : "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+ }
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " tst lr, #4 \n"
+ " ite eq \n"
+ " mrseq r0, msp \n"
+ " mrsne r0, psp \n"
+ " ldr r1, svchandler_address_const \n"
+ " bx r1 \n"
+ " \n"
+ " .align 4 \n"
+ "svchandler_address_const: .word vPortSVCHandler_C \n"
+ );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h
new file mode 100644
index 0000000..f606f81
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h
@@ -0,0 +1,78 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M85"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __attribute__( ( used ) )
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CA9/port.c b/Source/portable/IAR/ARM_CA9/port.c
new file mode 100644
index 0000000..b9126a9
--- /dev/null
+++ b/Source/portable/IAR/ARM_CA9/port.c
@@ -0,0 +1,438 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/* IAR includes. */
+#include <intrinsics.h>
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
+ #error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET
+ #error configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configUNIQUE_INTERRUPT_PRIORITIES
+ #error configUNIQUE_INTERRUPT_PRIORITIES must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configSETUP_TICK_INTERRUPT
+ #error configSETUP_TICK_INTERRUPT() must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif /* configSETUP_TICK_INTERRUPT */
+
+#ifndef configMAX_API_CALL_INTERRUPT_PRIORITY
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY == 0
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must not be set to 0
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY > configUNIQUE_INTERRUPT_PRIORITIES
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be less than or equal to configUNIQUE_INTERRUPT_PRIORITIES as the lower the numeric priority value the higher the logical interrupt priority
+#endif
+
+#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+ /* Check the configuration. */
+ #if( configMAX_PRIORITIES > 32 )
+ #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice.
+ #endif
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+/* In case security extensions are implemented. */
+#if configMAX_API_CALL_INTERRUPT_PRIORITY <= ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+#endif
+
+#ifndef configCLEAR_TICK_INTERRUPT
+ #define configCLEAR_TICK_INTERRUPT()
+#endif
+
+/* A critical section is exited when the critical section nesting count reaches
+this value. */
+#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
+
+/* In all GICs 255 can be written to the priority mask register to unmask all
+(but the lowest) interrupt priority. */
+#define portUNMASK_VALUE ( 0xFFUL )
+
+/* Tasks are not created with a floating point context, but can be given a
+floating point context after they have been created. A variable is stored as
+part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
+does not have an FPU context, or any other value if the task does have an FPU
+context. */
+#define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
+
+/* Constants required to setup the initial task context. */
+#define portINITIAL_SPSR ( ( StackType_t ) 0x1f ) /* System mode, ARM mode, interrupts enabled. */
+#define portTHUMB_MODE_BIT ( ( StackType_t ) 0x20 )
+#define portTHUMB_MODE_ADDRESS ( 0x01UL )
+
+/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary
+point is zero. */
+#define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 )
+
+/* Masks all bits in the APSR other than the mode bits. */
+#define portAPSR_MODE_BITS_MASK ( 0x1F )
+
+/* The value of the mode bits in the APSR when the CPU is executing in user
+mode. */
+#define portAPSR_USER_MODE ( 0x10 )
+
+/* Macro to unmask all interrupt priorities. */
+#define portCLEAR_INTERRUPT_MASK() \
+{ \
+ __disable_irq(); \
+ portICCPMR_PRIORITY_MASK_REGISTER = portUNMASK_VALUE; \
+ __asm( "DSB \n" \
+ "ISB \n" ); \
+ __enable_irq(); \
+}
+
+/*-----------------------------------------------------------*/
+
+/*
+ * Starts the first task executing. This function is necessarily written in
+ * assembly code so is implemented in portASM.s.
+ */
+extern void vPortRestoreTaskContext( void );
+
+/*
+ * Used to catch tasks that attempt to return from their implementing function.
+ */
+static void prvTaskExitError( void );
+
+/*-----------------------------------------------------------*/
+
+/* A variable is used to keep track of the critical section nesting. This
+variable has to be stored as part of the task context and must be initialised to
+a non zero value to ensure interrupts don't inadvertently become unmasked before
+the scheduler starts. As it is stored as part of the task context it will
+automatically be set to 0 when the first task is started. */
+volatile uint32_t ulCriticalNesting = 9999UL;
+
+/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero
+then a floating point context must be saved and restored for the task. */
+uint32_t ulPortTaskHasFPUContext = pdFALSE;
+
+/* Set to 1 to pend a context switch from an ISR. */
+uint32_t ulPortYieldRequired = pdFALSE;
+
+/* Counts the interrupt nesting depth. A context switch is only performed if
+if the nesting depth is 0. */
+uint32_t ulPortInterruptNesting = 0UL;
+
+
+/*-----------------------------------------------------------*/
+
+/*
+ * See header file for description.
+ */
+StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
+{
+ /* Setup the initial stack of the task. The stack is set exactly as
+ expected by the portRESTORE_CONTEXT() macro.
+
+ The fist real value on the stack is the status register, which is set for
+ system mode, with interrupts enabled. A few NULLs are added first to ensure
+ GDB does not try decoding a non-existent return address. */
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portINITIAL_SPSR;
+
+ if( ( ( uint32_t ) pxCode & portTHUMB_MODE_ADDRESS ) != 0x00UL )
+ {
+ /* The task will start in THUMB mode. */
+ *pxTopOfStack |= portTHUMB_MODE_BIT;
+ }
+
+ pxTopOfStack--;
+
+ /* Next the return address, which in this case is the start of the task. */
+ *pxTopOfStack = ( StackType_t ) pxCode;
+ pxTopOfStack--;
+
+ /* Next all the registers other than the stack pointer. */
+ *pxTopOfStack = ( StackType_t ) prvTaskExitError; /* R14 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212; /* R12 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111; /* R11 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010; /* R10 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909; /* R9 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808; /* R8 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707; /* R7 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606; /* R6 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505; /* R5 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404; /* R4 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303; /* R3 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202; /* R2 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ pxTopOfStack--;
+
+ /* The task will start with a critical nesting count of 0 as interrupts are
+ enabled. */
+ *pxTopOfStack = portNO_CRITICAL_NESTING;
+ pxTopOfStack--;
+
+ /* The task will start without a floating point context. A task that uses
+ the floating point hardware must call vPortTaskUsesFPU() before executing
+ any floating point instructions. */
+ *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
+
+ return pxTopOfStack;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ /* A function that implements a task must not exit or attempt to return to
+ its caller as there is nothing to return to. If a task wants to exit it
+ should instead call vTaskDelete( NULL ).
+
+ Artificially force an assert() to be triggered if configASSERT() is
+ defined, then stop here so application writers can catch the error. */
+ configASSERT( ulPortInterruptNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+ for( ;; );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void )
+{
+uint32_t ulAPSR;
+
+ /* Only continue if the CPU is not in User mode. The CPU must be in a
+ Privileged mode for the scheduler to start. */
+ __asm volatile ( "MRS %0, APSR" : "=r" ( ulAPSR ) );
+ ulAPSR &= portAPSR_MODE_BITS_MASK;
+ configASSERT( ulAPSR != portAPSR_USER_MODE );
+
+ if( ulAPSR != portAPSR_USER_MODE )
+ {
+ /* Only continue if the binary point value is set to its lowest possible
+ setting. See the comments in vPortValidateInterruptPriority() below for
+ more information. */
+ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE );
+
+ if( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE )
+ {
+ /* Start the timer that generates the tick ISR. */
+ configSETUP_TICK_INTERRUPT();
+
+ __enable_irq();
+ vPortRestoreTaskContext();
+ }
+ }
+
+ /* Will only get here if vTaskStartScheduler() was called with the CPU in
+ a non-privileged mode or the binary point register was not set to its lowest
+ possible value. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void )
+{
+ /* Not implemented in ports where there is nothing to return to.
+ Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void )
+{
+ /* Disable interrupts as per portDISABLE_INTERRUPTS(); */
+ ulPortSetInterruptMask();
+
+ /* Now interrupts are disabled ulCriticalNesting can be accessed
+ directly. Increment ulCriticalNesting to keep a count of how many times
+ portENTER_CRITICAL() has been called. */
+ ulCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ assert() if it is being called from an interrupt context. Only API
+ functions that end in "FromISR" can be used in an interrupt. Only assert if
+ the critical nesting count is 1 to protect against recursive calls if the
+ assert function also uses a critical section. */
+ if( ulCriticalNesting == 1 )
+ {
+ configASSERT( ulPortInterruptNesting == 0 );
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void )
+{
+ if( ulCriticalNesting > portNO_CRITICAL_NESTING )
+ {
+ /* Decrement the nesting count as the critical section is being
+ exited. */
+ ulCriticalNesting--;
+
+ /* If the nesting level has reached zero then all interrupt
+ priorities must be re-enabled. */
+ if( ulCriticalNesting == portNO_CRITICAL_NESTING )
+ {
+ /* Critical nesting has reached zero so all interrupt priorities
+ should be unmasked. */
+ portCLEAR_INTERRUPT_MASK();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+void FreeRTOS_Tick_Handler( void )
+{
+ /* Set interrupt mask before altering scheduler structures. The tick
+ handler runs at the lowest priority, so interrupts cannot already be masked,
+ so there is no need to save and restore the current mask value. */
+ __disable_irq();
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm( "DSB \n"
+ "ISB \n" );
+ __enable_irq();
+
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ ulPortYieldRequired = pdTRUE;
+ }
+
+ /* Ensure all interrupt priorities are active again. */
+ portCLEAR_INTERRUPT_MASK();
+ configCLEAR_TICK_INTERRUPT();
+}
+/*-----------------------------------------------------------*/
+
+void vPortTaskUsesFPU( void )
+{
+uint32_t ulInitialFPSCR = 0;
+
+ /* A task is registering the fact that it needs an FPU context. Set the
+ FPU flag (which is saved as part of the task context). */
+ ulPortTaskHasFPUContext = pdTRUE;
+
+ /* Initialise the floating point status register. */
+ __asm( "FMXR FPSCR, %0" :: "r" (ulInitialFPSCR) );
+}
+/*-----------------------------------------------------------*/
+
+void vPortClearInterruptMask( uint32_t ulNewMaskValue )
+{
+ if( ulNewMaskValue == pdFALSE )
+ {
+ portCLEAR_INTERRUPT_MASK();
+ }
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulPortSetInterruptMask( void )
+{
+uint32_t ulReturn;
+
+ __disable_irq();
+ if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
+ {
+ /* Interrupts were already masked. */
+ ulReturn = pdTRUE;
+ }
+ else
+ {
+ ulReturn = pdFALSE;
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm( "DSB \n"
+ "ISB \n" );
+ }
+ __enable_irq();
+
+ return ulReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if( configASSERT_DEFINED == 1 )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ /* The following assertion will fail if a service routine (ISR) for
+ an interrupt that has been assigned a priority above
+ configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ function. ISR safe FreeRTOS API functions must *only* be called
+ from interrupts that have been assigned a priority at or below
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ Numerically low interrupt priority numbers represent logically high
+ interrupt priorities, therefore the priority of the interrupt must
+ be set to a value equal to or numerically *higher* than
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ FreeRTOS maintains separate thread and ISR API functions to ensure
+ interrupt entry is as fast and simple as possible.
+
+ The following links provide detailed information:
+ https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( portICCRPR_RUNNING_PRIORITY_REGISTER >= ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
+
+ /* Priority grouping: The interrupt controller (GIC) allows the bits
+ that define each interrupt's priority to be split between bits that
+ define the interrupt's pre-emption priority bits and bits that define
+ the interrupt's sub-priority. For simplicity all bits must be defined
+ to be pre-emption priority bits. The following assertion will fail if
+ this is not the case (if some bits represent a sub-priority).
+
+ The priority grouping is configured by the GIC's binary point register
+ (ICCBPR). Writting 0 to ICCBPR will ensure it is set to its lowest
+ possible value (which may be above 0). */
+ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE );
+ }
+
+#endif /* configASSERT_DEFINED */
diff --git a/Source/portable/IAR/ARM_CA9/portASM.h b/Source/portable/IAR/ARM_CA9/portASM.h
new file mode 100644
index 0000000..31229b7
--- /dev/null
+++ b/Source/portable/IAR/ARM_CA9/portASM.h
@@ -0,0 +1,111 @@
+;/*
+; * FreeRTOS Kernel V10.6.2
+; * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+; *
+; * SPDX-License-Identifier: MIT
+; *
+; * Permission is hereby granted, free of charge, to any person obtaining a copy of
+; * this software and associated documentation files (the "Software"), to deal in
+; * the Software without restriction, including without limitation the rights to
+; * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+; * the Software, and to permit persons to whom the Software is furnished to do so,
+; * subject to the following conditions:
+; *
+; * The above copyright notice and this permission notice shall be included in all
+; * copies or substantial portions of the Software.
+; *
+; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+; * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+; * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+; * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+; * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+; *
+; * https://www.FreeRTOS.org
+; * https://github.com/FreeRTOS
+; *
+; */
+
+ EXTERN vTaskSwitchContext
+ EXTERN ulCriticalNesting
+ EXTERN pxCurrentTCB
+ EXTERN ulPortTaskHasFPUContext
+ EXTERN ulAsmAPIPriorityMask
+
+portSAVE_CONTEXT macro
+
+ ; Save the LR and SPSR onto the system mode stack before switching to
+ ; system mode to save the remaining system mode registers
+ SRSDB sp!, #SYS_MODE
+ CPS #SYS_MODE
+ PUSH {R0-R12, R14}
+
+ ; Push the critical nesting count
+ LDR R2, =ulCriticalNesting
+ LDR R1, [R2]
+ PUSH {R1}
+
+ ; Does the task have a floating point context that needs saving? If
+ ; ulPortTaskHasFPUContext is 0 then no.
+ LDR R2, =ulPortTaskHasFPUContext
+ LDR R3, [R2]
+ CMP R3, #0
+
+ ; Save the floating point context, if any
+ FMRXNE R1, FPSCR
+ VPUSHNE {D0-D15}
+ VPUSHNE {D16-D31}
+ PUSHNE {R1}
+
+ ; Save ulPortTaskHasFPUContext itself
+ PUSH {R3}
+
+ ; Save the stack pointer in the TCB
+ LDR R0, =pxCurrentTCB
+ LDR R1, [R0]
+ STR SP, [R1]
+
+ endm
+
+; /**********************************************************************/
+
+portRESTORE_CONTEXT macro
+
+ ; Set the SP to point to the stack of the task being restored.
+ LDR R0, =pxCurrentTCB
+ LDR R1, [R0]
+ LDR SP, [R1]
+
+ ; Is there a floating point context to restore? If the restored
+ ; ulPortTaskHasFPUContext is zero then no.
+ LDR R0, =ulPortTaskHasFPUContext
+ POP {R1}
+ STR R1, [R0]
+ CMP R1, #0
+
+ ; Restore the floating point context, if any
+ POPNE {R0}
+ VPOPNE {D16-D31}
+ VPOPNE {D0-D15}
+ VMSRNE FPSCR, R0
+
+ ; Restore the critical section nesting depth
+ LDR R0, =ulCriticalNesting
+ POP {R1}
+ STR R1, [R0]
+
+ ; Ensure the priority mask is correct for the critical nesting depth
+ LDR R2, =portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS
+ CMP R1, #0
+ MOVEQ R4, #255
+ LDRNE R4, =( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT )
+ STR R4, [r2]
+
+ ; Restore all system mode registers other than the SP (which is already
+ ; being used)
+ POP {R0-R12, R14}
+
+ ; Return to the task code, loading CPSR on the way.
+ RFEIA sp!
+
+ endm
diff --git a/Source/portable/IAR/ARM_CA9/portASM.s b/Source/portable/IAR/ARM_CA9/portASM.s
new file mode 100644
index 0000000..e9c87c6
--- /dev/null
+++ b/Source/portable/IAR/ARM_CA9/portASM.s
@@ -0,0 +1,174 @@
+;/*
+; * FreeRTOS Kernel V10.6.2
+; * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+; *
+; * SPDX-License-Identifier: MIT
+; *
+; * Permission is hereby granted, free of charge, to any person obtaining a copy of
+; * this software and associated documentation files (the "Software"), to deal in
+; * the Software without restriction, including without limitation the rights to
+; * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+; * the Software, and to permit persons to whom the Software is furnished to do so,
+; * subject to the following conditions:
+; *
+; * The above copyright notice and this permission notice shall be included in all
+; * copies or substantial portions of the Software.
+; *
+; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+; * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+; * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+; * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+; * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+; *
+; * https://www.FreeRTOS.org
+; * https://github.com/FreeRTOS
+; *
+; */
+
+ INCLUDE FreeRTOSConfig.h
+ INCLUDE portmacro.h
+
+ EXTERN vApplicationIRQHandler
+ EXTERN vTaskSwitchContext
+ EXTERN ulPortYieldRequired
+ EXTERN ulPortInterruptNesting
+
+ PUBLIC FreeRTOS_SWI_Handler
+ PUBLIC FreeRTOS_IRQ_Handler
+ PUBLIC vPortRestoreTaskContext
+
+SYS_MODE EQU 0x1f
+SVC_MODE EQU 0x13
+IRQ_MODE EQU 0x12
+
+
+ SECTION .text:CODE:ROOT(2)
+ ARM
+
+ INCLUDE portASM.h
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; SVC handler is used to yield a task.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+FreeRTOS_SWI_Handler
+
+ PRESERVE8
+
+ ; Save the context of the current task and select a new task to run.
+ portSAVE_CONTEXT
+ LDR R0, =vTaskSwitchContext
+ BLX R0
+ portRESTORE_CONTEXT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; vPortRestoreTaskContext is used to start the scheduler.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+vPortRestoreTaskContext
+ ; Switch to system mode
+ CPS #SYS_MODE
+ portRESTORE_CONTEXT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PL390 GIC interrupt handler
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+FreeRTOS_IRQ_Handler
+
+ ; Return to the interrupted instruction.
+ SUB lr, lr, #4
+
+ ; Push the return address and SPSR
+ PUSH {lr}
+ MRS lr, SPSR
+ PUSH {lr}
+
+ ; Change to supervisor mode to allow reentry.
+ CPS #SVC_MODE
+
+ ; Push used registers.
+ PUSH {r0-r4, r12}
+
+ ; Increment nesting count. r3 holds the address of ulPortInterruptNesting
+ ; for future use. r1 holds the original ulPortInterruptNesting value for
+ ; future use.
+ LDR r3, =ulPortInterruptNesting
+ LDR r1, [r3]
+ ADD r4, r1, #1
+ STR r4, [r3]
+
+ ; Read value from the interrupt acknowledge register, which is stored in r0
+ ; for future parameter and interrupt clearing use.
+ LDR r2, =portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS
+ LDR r0, [r2]
+
+ ; Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for
+ ; future use. _RB_ Is this ever necessary if start of stack is 8-byte aligned?
+ MOV r2, sp
+ AND r2, r2, #4
+ SUB sp, sp, r2
+
+ ; Call the interrupt handler. r4 is pushed to maintain alignment.
+ PUSH {r0-r4, lr}
+ LDR r1, =vApplicationIRQHandler
+ BLX r1
+ POP {r0-r4, lr}
+ ADD sp, sp, r2
+
+ CPSID i
+
+ ; Write the value read from ICCIAR to ICCEOIR
+ LDR r4, =portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS
+ STR r0, [r4]
+
+ ; Restore the old nesting count
+ STR r1, [r3]
+
+ ; A context switch is never performed if the nesting count is not 0
+ CMP r1, #0
+ BNE exit_without_switch
+
+ ; Did the interrupt request a context switch? r1 holds the address of
+ ; ulPortYieldRequired and r0 the value of ulPortYieldRequired for future
+ ; use.
+ LDR r1, =ulPortYieldRequired
+ LDR r0, [r1]
+ CMP r0, #0
+ BNE switch_before_exit
+
+exit_without_switch
+ ; No context switch. Restore used registers, LR_irq and SPSR before
+ ; returning.
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ MOVS PC, LR
+
+switch_before_exit
+ ; A context switch is to be performed. Clear the context switch pending
+ ; flag.
+ MOV r0, #0
+ STR r0, [r1]
+
+ ; Restore used registers, LR-irq and SPSR before saving the context
+ ; to the task stack.
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ portSAVE_CONTEXT
+
+ ; Call the function that selects the new task to execute.
+ ; vTaskSwitchContext() if vTaskSwitchContext() uses LDRD or STRD
+ ; instructions, or 8 byte aligned stack allocated data. LR does not need
+ ; saving as a new LR will be loaded by portRESTORE_CONTEXT anyway.
+ LDR r0, =vTaskSwitchContext
+ BLX r0
+
+ ; Restore the context of, and branch to, the task selected to execute next.
+ portRESTORE_CONTEXT
+
+
+ END
diff --git a/Source/portable/IAR/ARM_CA9/portmacro.h b/Source/portable/IAR/ARM_CA9/portmacro.h
new file mode 100644
index 0000000..244fd41
--- /dev/null
+++ b/Source/portable/IAR/ARM_CA9/portmacro.h
@@ -0,0 +1,212 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* IAR includes. */
+#ifdef __ICCARM__
+
+ #include <intrinsics.h>
+
+ /*-----------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *-----------------------------------------------------------
+ */
+
+ /* Type definitions. */
+ #define portCHAR char
+ #define portFLOAT float
+ #define portDOUBLE double
+ #define portLONG long
+ #define portSHORT short
+ #define portSTACK_TYPE uint32_t
+ #define portBASE_TYPE long
+
+ typedef portSTACK_TYPE StackType_t;
+ typedef long BaseType_t;
+ typedef unsigned long UBaseType_t;
+
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+ /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+
+ /*-----------------------------------------------------------*/
+
+ /* Hardware specifics. */
+ #define portSTACK_GROWTH ( -1 )
+ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+ #define portBYTE_ALIGNMENT 8
+
+ /*-----------------------------------------------------------*/
+
+ /* Task utilities. */
+
+ /* Called at the end of an ISR that can cause a context switch. */
+ #define portEND_SWITCHING_ISR( xSwitchRequired )\
+ { \
+ extern uint32_t ulPortYieldRequired; \
+ \
+ if( xSwitchRequired != pdFALSE ) \
+ { \
+ ulPortYieldRequired = pdTRUE; \
+ } \
+ }
+
+ #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+ #define portYIELD() __asm( "SWI 0" );
+
+
+ /*-----------------------------------------------------------
+ * Critical section control
+ *----------------------------------------------------------*/
+
+ extern void vPortEnterCritical( void );
+ extern void vPortExitCritical( void );
+ extern uint32_t ulPortSetInterruptMask( void );
+ extern void vPortClearInterruptMask( uint32_t ulNewMaskValue );
+
+ /* These macros do not globally disable/enable interrupts. They do mask off
+ interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
+ #define portENTER_CRITICAL() vPortEnterCritical();
+ #define portEXIT_CRITICAL() vPortExitCritical();
+ #define portDISABLE_INTERRUPTS() ulPortSetInterruptMask()
+ #define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 )
+ #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask()
+ #define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
+
+ /*-----------------------------------------------------------*/
+
+ /* Task function macros as described on the FreeRTOS.org WEB site. These are
+ not required for this port but included in case common demo code that uses these
+ macros is used. */
+ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
+ #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
+
+ /* Prototype of the FreeRTOS tick handler. This must be installed as the
+ handler for whichever peripheral is used to generate the RTOS tick. */
+ void FreeRTOS_Tick_Handler( void );
+
+ /* Any task that uses the floating point unit MUST call vPortTaskUsesFPU()
+ before any floating point instructions are executed. */
+ void vPortTaskUsesFPU( void );
+ #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
+
+ #define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
+ #define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL )
+
+ /* Architecture specific optimisations. */
+ #ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
+ #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
+ #endif
+
+ #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+
+ /* Store/clear the ready priorities in a bit map. */
+ #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
+ #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
+
+ /*-----------------------------------------------------------*/
+
+ #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __CLZ( uxReadyPriorities ) )
+
+ #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+ #ifdef configASSERT
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif /* configASSERT */
+
+ #define portNOP() __asm volatile( "NOP" )
+
+ /* Suppress warnings that are generated by the IAR tools, but cannot be
+ fixed in the source code because to do so would cause other compilers to
+ generate warnings. */
+ #pragma diag_suppress=Pe191
+ #pragma diag_suppress=Pa082
+
+#endif /* __ICCARM__ */
+
+
+/* The number of bits to shift for an interrupt priority is dependent on the
+number of bits implemented by the interrupt controller. */
+#if configUNIQUE_INTERRUPT_PRIORITIES == 16
+ #define portPRIORITY_SHIFT 4
+ #define portMAX_BINARY_POINT_VALUE 3
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 32
+ #define portPRIORITY_SHIFT 3
+ #define portMAX_BINARY_POINT_VALUE 2
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 64
+ #define portPRIORITY_SHIFT 2
+ #define portMAX_BINARY_POINT_VALUE 1
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 128
+ #define portPRIORITY_SHIFT 1
+ #define portMAX_BINARY_POINT_VALUE 0
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 256
+ #define portPRIORITY_SHIFT 0
+ #define portMAX_BINARY_POINT_VALUE 0
+#else
+ #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware
+#endif
+
+/* Interrupt controller access addresses. */
+#define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C )
+#define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 )
+#define portICCBPR_BINARY_POINT_OFFSET ( 0x08 )
+#define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 )
+
+#define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET )
+#define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET )
+#define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) )
+#define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) )
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM0/port.c b/Source/portable/IAR/ARM_CM0/port.c
index 6fed504..b9999a0 100644
--- a/Source/portable/IAR/ARM_CM0/port.c
+++ b/Source/portable/IAR/ARM_CM0/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -56,13 +56,6 @@
/* Constants required to set up the initial stack. */
#define portINITIAL_XPSR ( 0x01000000 )
-/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is
- * defined. The value 255 should also ensure backward compatibility.
- * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */
-#ifndef configKERNEL_INTERRUPT_PRIORITY
- #define configKERNEL_INTERRUPT_PRIORITY 0
-#endif
-
/* Each task maintains its own interrupt status in the critical nesting
* variable. */
static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
diff --git a/Source/portable/IAR/ARM_CM0/portasm.s b/Source/portable/IAR/ARM_CM0/portasm.s
index 4e6ea5b..8b5caf2 100644
--- a/Source/portable/IAR/ARM_CM0/portasm.s
+++ b/Source/portable/IAR/ARM_CM0/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,105 +28,105 @@
#include <FreeRTOSConfig.h>
- RSEG CODE:CODE(2)
- thumb
+ RSEG CODE:CODE(2)
+ thumb
- EXTERN vPortYieldFromISR
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
+ EXTERN vPortYieldFromISR
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
- PUBLIC vSetMSP
- PUBLIC xPortPendSVHandler
- PUBLIC vPortSVCHandler
- PUBLIC vPortStartFirstTask
- PUBLIC ulSetInterruptMaskFromISR
- PUBLIC vClearInterruptMaskFromISR
+ PUBLIC vSetMSP
+ PUBLIC xPortPendSVHandler
+ PUBLIC vPortSVCHandler
+ PUBLIC vPortStartFirstTask
+ PUBLIC ulSetInterruptMaskFromISR
+ PUBLIC vClearInterruptMaskFromISR
/*-----------------------------------------------------------*/
vSetMSP
- msr msp, r0
- bx lr
+ msr msp, r0
+ bx lr
/*-----------------------------------------------------------*/
xPortPendSVHandler:
- mrs r0, psp
+ mrs r0, psp
- ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
- ldr r2, [r3]
+ ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
+ ldr r2, [r3]
- subs r0, r0, #32 /* Make space for the remaining low registers. */
- str r0, [r2] /* Save the new top of stack. */
- stmia r0!, {r4-r7} /* Store the low registers that are not saved automatically. */
- mov r4, r8 /* Store the high registers. */
- mov r5, r9
- mov r6, r10
- mov r7, r11
- stmia r0!, {r4-r7}
+ subs r0, r0, #32 /* Make space for the remaining low registers. */
+ str r0, [r2] /* Save the new top of stack. */
+ stmia r0!, {r4-r7} /* Store the low registers that are not saved automatically. */
+ mov r4, r8 /* Store the high registers. */
+ mov r5, r9
+ mov r6, r10
+ mov r7, r11
+ stmia r0!, {r4-r7}
- push {r3, r14}
- cpsid i
- bl vTaskSwitchContext
- cpsie i
- pop {r2, r3} /* lr goes in r3. r2 now holds tcb pointer. */
+ push {r3, r14}
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+ pop {r2, r3} /* lr goes in r3. r2 now holds tcb pointer. */
- ldr r1, [r2]
- ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
- adds r0, r0, #16 /* Move to the high registers. */
- ldmia r0!, {r4-r7} /* Pop the high registers. */
- mov r8, r4
- mov r9, r5
- mov r10, r6
- mov r11, r7
+ ldr r1, [r2]
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
+ adds r0, r0, #16 /* Move to the high registers. */
+ ldmia r0!, {r4-r7} /* Pop the high registers. */
+ mov r8, r4
+ mov r9, r5
+ mov r10, r6
+ mov r11, r7
- msr psp, r0 /* Remember the new top of stack for the task. */
+ msr psp, r0 /* Remember the new top of stack for the task. */
- subs r0, r0, #32 /* Go back for the low registers that are not automatically restored. */
- ldmia r0!, {r4-r7} /* Pop low registers. */
+ subs r0, r0, #32 /* Go back for the low registers that are not automatically restored. */
+ ldmia r0!, {r4-r7} /* Pop low registers. */
- bx r3
+ bx r3
/*-----------------------------------------------------------*/
vPortSVCHandler;
- /* This function is no longer used, but retained for backward
- compatibility. */
- bx lr
+ /* This function is no longer used, but retained for backward
+ compatibility. */
+ bx lr
/*-----------------------------------------------------------*/
vPortStartFirstTask
- /* The MSP stack is not reset as, unlike on M3/4 parts, there is no vector
- table offset register that can be used to locate the initial stack value.
- Not all M0 parts have the application vector table at address 0. */
+ /* The MSP stack is not reset as, unlike on M3/4 parts, there is no vector
+ table offset register that can be used to locate the initial stack value.
+ Not all M0 parts have the application vector table at address 0. */
- ldr r3, =pxCurrentTCB /* Obtain location of pxCurrentTCB. */
- ldr r1, [r3]
- ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- movs r0, #2 /* Switch to the psp stack. */
- msr CONTROL, r0
- isb
- pop {r0-r5} /* Pop the registers that are saved automatically. */
- mov lr, r5 /* lr is now in r5. */
- pop {r3} /* The return address is now in r3. */
- pop {r2} /* Pop and discard the XPSR. */
- cpsie i /* The first task has its context and interrupts can be enabled. */
- bx r3 /* Jump to the user defined task code. */
+ ldr r3, =pxCurrentTCB /* Obtain location of pxCurrentTCB. */
+ ldr r1, [r3]
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ movs r0, #2 /* Switch to the psp stack. */
+ msr CONTROL, r0
+ isb
+ pop {r0-r5} /* Pop the registers that are saved automatically. */
+ mov lr, r5 /* lr is now in r5. */
+ pop {r3} /* The return address is now in r3. */
+ pop {r2} /* Pop and discard the XPSR. */
+ cpsie i /* The first task has its context and interrupts can be enabled. */
+ bx r3 /* Jump to the user defined task code. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR
- mrs r0, PRIMASK
- cpsid i
- bx lr
+ mrs r0, PRIMASK
+ cpsid i
+ bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR
- msr PRIMASK, r0
- bx lr
+ msr PRIMASK, r0
+ bx lr
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM0/portmacro.h b/Source/portable/IAR/ARM_CM0/portmacro.h
index d5f8f82..e0ccf6b 100644
--- a/Source/portable/IAR/ARM_CM0/portmacro.h
+++ b/Source/portable/IAR/ARM_CM0/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -26,12 +26,15 @@
*
*/
-#ifndef PORTMACRO_H
- #define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -44,85 +47,119 @@
*/
/* Type definitions. */
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/* Architecture specifics. */
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
- extern void vPortYield( void );
- #define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 )
- #define portNVIC_PENDSVSET 0x10000000
- #define portYIELD() vPortYield()
- #define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+extern void vPortYield( void );
+#define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 )
+#define portNVIC_PENDSVSET 0x10000000
+#define portYIELD() vPortYield()
+#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Critical section management. */
- extern void vPortEnterCritical( void );
- extern void vPortExitCritical( void );
- extern uint32_t ulSetInterruptMaskFromISR( void );
- extern void vClearInterruptMaskFromISR( uint32_t ulMask );
+extern void vPortEnterCritical( void );
+extern void vPortExitCritical( void );
+extern uint32_t ulSetInterruptMaskFromISR( void );
+extern void vClearInterruptMaskFromISR( uint32_t ulMask );
- #define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" )
- #define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
+#define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" )
+#define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
/*-----------------------------------------------------------*/
/* Tickless idle/low power functionality. */
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portNOP()
+#define portNOP()
+
+#define portINLINE __inline
+
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+
+/*-----------------------------------------------------------*/
+
+portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+
+/*-----------------------------------------------------------*/
/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
* the source code because to do so would cause other compilers to generate
* warnings. */
- #pragma diag_suppress=Pa082
+#pragma diag_suppress=Pa082
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..4d805f3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1407 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/port.c b/Source/portable/IAR/ARM_CM23/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/IAR/ARM_CM23/non_secure/port.c
+++ b/Source/portable/IAR/ARM_CM23/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/portasm.h b/Source/portable/IAR/ARM_CM23/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/IAR/ARM_CM23/non_secure/portasm.h
+++ b/Source/portable/IAR/ARM_CM23/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/portasm.s b/Source/portable/IAR/ARM_CM23/non_secure/portasm.s
index 6e658f8..3c17889 100644
--- a/Source/portable/IAR/ARM_CM23/non_secure/portasm.s
+++ b/Source/portable/IAR/ARM_CM23/non_secure/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -33,27 +33,38 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
- EXTERN pxCurrentTCB
- EXTERN xSecureContext
- EXTERN vTaskSwitchContext
- EXTERN vPortSVCHandler_C
- EXTERN SecureContext_SaveContext
- EXTERN SecureContext_LoadContext
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
- PUBLIC xIsPrivileged
- PUBLIC vResetPrivilege
- PUBLIC vPortAllocateSecureContext
- PUBLIC vRestoreContextOfFirstTask
- PUBLIC vRaisePrivilege
- PUBLIC vStartFirstTask
- PUBLIC ulSetInterruptMask
- PUBLIC vClearInterruptMask
- PUBLIC PendSV_Handler
- PUBLIC SVC_Handler
- PUBLIC vPortFreeSecureContext
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN xSecureContext
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+ EXTERN SecureContext_SaveContext
+ EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vPortAllocateSecureContext
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+ PUBLIC vPortFreeSecureContext
#if ( configENABLE_FPU == 1 )
- #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
+ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
/*-----------------------------------------------------------*/
@@ -61,331 +72,457 @@
/*-----------------------------------------------------------*/
- SECTION .text:CODE:NOROOT(2)
- THUMB
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
- mrs r0, control /* r0 = CONTROL. */
- movs r1, #1 /* r1 = 1. */
- tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
- beq running_privileged /* If the result of previous AND operation was 0, branch. */
- movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- bx lr /* Return. */
- running_privileged:
- movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- bx lr /* Return. */
+ mrs r0, control /* r0 = CONTROL. */
+ movs r1, #1 /* r1 = 1. */
+ tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
+ beq running_privileged /* If the result of previous AND operation was 0, branch. */
+ movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+ running_privileged:
+ movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
- mrs r0, control /* r0 = CONTROL. */
- movs r1, #1 /* r1 = 1. */
- orrs r0, r1 /* r0 = r0 | r1. */
- msr control, r0 /* CONTROL = r0. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* r0 = CONTROL. */
+ movs r1, #1 /* r1 = 1. */
+ orrs r0, r1 /* r0 = r0 | r1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
- svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
- bx lr /* Return. */
+ svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
- SECTION privileged_functions:CODE:NOROOT(2)
- THUMB
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
vRestoreContextOfFirstTask:
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r3, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r2] /* Program RNR = 4. */
- ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r2] /* Program RNR = 5. */
- ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r2] /* Program RNR = 6. */
- ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r2] /* Program RNR = 7. */
- ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r4 /* Finally, branch to EXC_RETURN. */
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- ldr r4, =xSecureContext
- str r1, [r4] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- movs r1, #2 /* r1 = 2. */
- msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r3 /* Finally, branch to EXC_RETURN. */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r3, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ ldr r4, =xSecureContext
+ str r1, [r4] /* Set xSecureContext to this task's value for the same. */
+ msr psplim, r2 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
- mrs r0, control /* Read the CONTROL register. */
- movs r1, #1 /* r1 = 1. */
- bics r0, r1 /* Clear the bit 0. */
- msr control, r0 /* Write back the new CONTROL value. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* Read the CONTROL register. */
+ movs r1, #1 /* r1 = 1. */
+ bics r0, r1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
- ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
- ldr r0, [r0] /* The first entry in vector table is stack pointer. */
- msr msp, r0 /* Set the MSP back to the start of the stack. */
- cpsie i /* Globally enable interrupts. */
- dsb
- isb
- svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
- mrs r0, PRIMASK
- cpsid i
- bx lr
+ mrs r0, PRIMASK
+ cpsid i
+ bx lr
/*-----------------------------------------------------------*/
vClearInterruptMask:
- msr PRIMASK, r0
- bx lr
+ msr PRIMASK, r0
+ bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
PendSV_Handler:
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
- mrs r2, psp /* Read PSP in r2. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
- cbz r0, save_ns_context /* No secure context to save. */
- push {r0-r2, r14}
- bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r0-r3} /* LR is now in r3. */
- mov lr, r3 /* LR = r3. */
- lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* Restore LR. */
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+ stmia r2!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r2!, {r4-r7} /* Store r8-r11. */
+ ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ mov r6, lr /* r6 = LR. */
+ stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r4} /* LR is now in r4. */
+ mov lr, r4
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ mrs r2, psp /* Read PSP in r2. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ push {r0-r2, r14}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* LR = r3. */
+ lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+
+ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+ b select_next_task
+
+ save_ns_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r2!, {r0, r1, r3-r7} /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+
+ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ msr psplim, r1 /* Restore the PSPLIM register value for the task. */
+ mov lr, r4 /* LR = r4. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r3] /* Restore the task's xSecureContext. */
+ cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ push {r2, r4}
+ bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r2, r4}
+ mov lr, r4 /* LR = r4. */
+ lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+ restore_ns_context:
+ adds r2, r2, #16 /* Move to the high registers. */
+ ldmia r2!, {r4-r7} /* Restore the high registers that are not automatically restored. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ subs r2, r2, #32 /* Go back to the low registers. */
+ ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */
+ bx lr
+
#endif /* configENABLE_MPU */
- b select_next_task
-
- save_ns_context:
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #48 /* r2 = r2 - 48. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3-r7} /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
-
- select_next_task:
- cpsid i
- bl vTaskSwitchContext
- cpsie i
-
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
-
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r4] /* Program RNR = 4. */
- ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r4] /* Program RNR = 5. */
- ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r4] /* Program RNR = 6. */
- ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r4] /* Program RNR = 7. */
- ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
- ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #endif /* configENABLE_MPU */
-
- restore_ns_context:
- adds r2, r2, #16 /* Move to the high registers. */
- ldmia r2!, {r4-r7} /* Restore the high registers that are not automatically restored. */
- mov r8, r4 /* r8 = r4. */
- mov r9, r5 /* r9 = r5. */
- mov r10, r6 /* r10 = r6. */
- mov r11, r7 /* r11 = r7. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- subs r2, r2, #32 /* Go back to the low registers. */
- ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */
- bx lr
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
SVC_Handler:
- movs r0, #4
- mov r1, lr
- tst r0, r1
- beq stacking_used_msp
- mrs r0, psp
- b vPortSVCHandler_C
- stacking_used_msp:
- mrs r0, msp
- b vPortSVCHandler_C
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r3, [r0, #24]
+ subs r3, #2
+ ldrb r2, [r3, #0]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt system_call_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stacking_used_msp
+ mrs r0, psp
+ b vPortSVCHandler_C
+ stacking_used_msp:
+ mrs r0, msp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
- ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
- ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
- cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
- bne free_secure_context /* Branch if r1 != 0. */
- bx lr /* There is no secure context (xSecureContext is NULL). */
- free_secure_context:
- svc 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
- bx lr /* Return. */
+ ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
+ ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
+ cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
+ bne free_secure_context /* Branch if r1 != 0. */
+ bx lr /* There is no secure context (xSecureContext is NULL). */
+ free_secure_context:
+ svc 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM23/non_secure/portmacro.h
index f31bd3f..19d7556 100644
--- a/Source/portable/IAR/ARM_CM23/non_secure/portmacro.h
+++ b/Source/portable/IAR/ARM_CM23/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -48,11 +48,16 @@
/**
* Architecture specifics.
*/
-#define portARCH_NAME "Cortex-M23"
-#define portDONT_DISCARD __root
+#define portARCH_NAME "Cortex-M23"
+#define portHAS_BASEPRI 0
+#define portDONT_DISCARD __root
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+#if ( configTOTAL_MPU_REGIONS == 16 )
#error 16 MPU regions are not yet supported for this port.
#endif
/*-----------------------------------------------------------*/
@@ -60,8 +65,8 @@
/**
* @brief Critical section management.
*/
-#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
-#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
+#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
+#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
/*-----------------------------------------------------------*/
/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
@@ -71,8 +76,10 @@
#pragma diag_suppress=Pa082
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
+++ b/Source/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_context.c b/Source/portable/IAR/ARM_CM23/secure/secure_context.c
index 1996693..e37dd96 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_context.c
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_context.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_context.h b/Source/portable/IAR/ARM_CM23/secure/secure_context.h
index de33d15..2220ea6 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_context.h
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_context.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_context_port_asm.s b/Source/portable/IAR/ARM_CM23/secure/secure_context_port_asm.s
index aee841a..1e4b3a5 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_context_port_asm.s
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_context_port_asm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_heap.c b/Source/portable/IAR/ARM_CM23/secure/secure_heap.c
index b3bf007..19f7c23 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_heap.c
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_heap.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_heap.h b/Source/portable/IAR/ARM_CM23/secure/secure_heap.h
index e469f2c..75c9cb0 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_heap.h
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_heap.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_init.c b/Source/portable/IAR/ARM_CM23/secure/secure_init.c
index f6570d8..f93bfce 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_init.c
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_init.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_init.h b/Source/portable/IAR/ARM_CM23/secure/secure_init.h
index e89af71..e6c9da0 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_init.h
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_init.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23/secure/secure_port_macros.h b/Source/portable/IAR/ARM_CM23/secure/secure_port_macros.h
index 2fb7c59..d7ac583 100644
--- a/Source/portable/IAR/ARM_CM23/secure/secure_port_macros.h
+++ b/Source/portable/IAR/ARM_CM23/secure/secure_port_macros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..4d805f3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1407 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.h b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.h
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
index 9850588..cef6b8a 100644
--- a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -32,22 +32,33 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
- EXTERN vPortSVCHandler_C
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
- PUBLIC xIsPrivileged
- PUBLIC vResetPrivilege
- PUBLIC vRestoreContextOfFirstTask
- PUBLIC vRaisePrivilege
- PUBLIC vStartFirstTask
- PUBLIC ulSetInterruptMask
- PUBLIC vClearInterruptMask
- PUBLIC PendSV_Handler
- PUBLIC SVC_Handler
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
#if ( configENABLE_FPU == 1 )
- #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
+ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
/*-----------------------------------------------------------*/
@@ -55,256 +66,371 @@
/*-----------------------------------------------------------*/
- SECTION .text:CODE:NOROOT(2)
- THUMB
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
- mrs r0, control /* r0 = CONTROL. */
- movs r1, #1 /* r1 = 1. */
- tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
- beq running_privileged /* If the result of previous AND operation was 0, branch. */
- movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- bx lr /* Return. */
- running_privileged:
- movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- bx lr /* Return. */
+ mrs r0, control /* r0 = CONTROL. */
+ movs r1, #1 /* r1 = 1. */
+ tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
+ beq running_privileged /* If the result of previous AND operation was 0, branch. */
+ movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+ running_privileged:
+ movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
- mrs r0, control /* r0 = CONTROL. */
- movs r1, #1 /* r1 = 1. */
- orrs r0, r1 /* r0 = r0 | r1. */
- msr control, r0 /* CONTROL = r0. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* r0 = CONTROL. */
+ movs r1, #1 /* r1 = 1. */
+ orrs r0, r1 /* r0 = r0 | r1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
- SECTION privileged_functions:CODE:NOROOT(2)
- THUMB
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
vRestoreContextOfFirstTask:
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r3 /* Finally, branch to EXC_RETURN. */
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs_first_task:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- movs r1, #2 /* r1 = 2. */
- msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r2 /* Finally, branch to EXC_RETURN. */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ msr psplim, r1 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
- mrs r0, control /* Read the CONTROL register. */
- movs r1, #1 /* r1 = 1. */
- bics r0, r1 /* Clear the bit 0. */
- msr control, r0 /* Write back the new CONTROL value. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* Read the CONTROL register. */
+ movs r1, #1 /* r1 = 1. */
+ bics r0, r1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
- ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
- ldr r0, [r0] /* The first entry in vector table is stack pointer. */
- msr msp, r0 /* Set the MSP back to the start of the stack. */
- cpsie i /* Globally enable interrupts. */
- dsb
- isb
- svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
- nop
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
+ nop
/*-----------------------------------------------------------*/
ulSetInterruptMask:
- mrs r0, PRIMASK
- cpsid i
- bx lr
+ mrs r0, PRIMASK
+ cpsid i
+ bx lr
/*-----------------------------------------------------------*/
vClearInterruptMask:
- msr PRIMASK, r0
- bx lr
+ msr PRIMASK, r0
+ bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
PendSV_Handler:
- mrs r0, psp /* Read PSP in r0. */
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r0, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ stmia r1!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r1!, {r4-r7} /* Store r8-r11. */
+ ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r2, psp /* r2 = PSP. */
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ mov r5, lr /* r5 = LR. */
+ stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */
- str r0, [r1] /* Save the new top of stack in TCB. */
- mrs r2, psplim /* r2 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r0!, {r2-r7} /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#endif /* configENABLE_MPU */
- cpsid i
- bl vTaskSwitchContext
- cpsie i
+PendSV_Handler:
+ mrs r0, psp /* Read PSP in r0. */
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+ subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */
+ str r0, [r1] /* Save the new top of stack in TCB. */
+ mrs r2, psplim /* r2 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r0!, {r2-r7} /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
+ adds r0, r0, #24 /* Move to the high registers. */
+ ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ msr psp, r0 /* Remember the new top of stack for the task. */
+ subs r0, r0, #40 /* Move to the starting of the saved context. */
+ ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
+ msr psplim, r2 /* Restore the PSPLIM register value for the task. */
+ bx r3
-#if ( configENABLE_MPU == 1 )
- adds r0, r0, #28 /* Move to the high registers. */
- ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
- mov r8, r4 /* r8 = r4. */
- mov r9, r5 /* r9 = r5. */
- mov r10, r6 /* r10 = r6. */
- mov r11, r7 /* r11 = r7. */
- msr psp, r0 /* Remember the new top of stack for the task. */
- subs r0, r0, #44 /* Move to the starting of the saved context. */
- ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
- bx r3
-#else /* configENABLE_MPU */
- adds r0, r0, #24 /* Move to the high registers. */
- ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
- mov r8, r4 /* r8 = r4. */
- mov r9, r5 /* r9 = r5. */
- mov r10, r6 /* r10 = r6. */
- mov r11, r7 /* r11 = r7. */
- msr psp, r0 /* Remember the new top of stack for the task. */
- subs r0, r0, #40 /* Move to the starting of the saved context. */
- ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
- msr psplim, r2 /* Restore the PSPLIM register value for the task. */
- bx r3
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
SVC_Handler:
- movs r0, #4
- mov r1, lr
- tst r0, r1
- beq stacking_used_msp
- mrs r0, psp
- b vPortSVCHandler_C
- stacking_used_msp:
- mrs r0, msp
- b vPortSVCHandler_C
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r3, [r0, #24]
+ subs r3, #2
+ ldrb r2, [r3, #0]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt system_call_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stacking_used_msp
+ mrs r0, psp
+ b vPortSVCHandler_C
+ stacking_used_msp:
+ mrs r0, msp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h
index f31bd3f..19d7556 100644
--- a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -48,11 +48,16 @@
/**
* Architecture specifics.
*/
-#define portARCH_NAME "Cortex-M23"
-#define portDONT_DISCARD __root
+#define portARCH_NAME "Cortex-M23"
+#define portHAS_BASEPRI 0
+#define portDONT_DISCARD __root
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+#if ( configTOTAL_MPU_REGIONS == 16 )
#error 16 MPU regions are not yet supported for this port.
#endif
/*-----------------------------------------------------------*/
@@ -60,8 +65,8 @@
/**
* @brief Critical section management.
*/
-#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
-#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
+#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
+#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
/*-----------------------------------------------------------*/
/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
@@ -71,8 +76,10 @@
#pragma diag_suppress=Pa082
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
+++ b/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM3/port.c b/Source/portable/IAR/ARM_CM3/port.c
index 17eaa63..10ce863 100644
--- a/Source/portable/IAR/ARM_CM3/port.c
+++ b/Source/portable/IAR/ARM_CM3/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -55,8 +55,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -86,13 +87,6 @@
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
-/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is
- * defined. The value 255 should also ensure backward compatibility.
- * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */
-#ifndef configKERNEL_INTERRUPT_PRIORITY
- #define configKERNEL_INTERRUPT_PRIORITY 255
-#endif
-
/* Let the user override the default SysTick clock rate. If defined by the
* user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
* configuration register. */
@@ -214,13 +208,10 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -230,7 +221,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -242,33 +233,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -277,7 +288,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -631,10 +642,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/IAR/ARM_CM3/portasm.s b/Source/portable/IAR/ARM_CM3/portasm.s
index b53183e..dfaabc3 100644
--- a/Source/portable/IAR/ARM_CM3/portasm.s
+++ b/Source/portable/IAR/ARM_CM3/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,77 +28,77 @@
#include <FreeRTOSConfig.h>
- RSEG CODE:CODE(2)
- thumb
+ RSEG CODE:CODE(2)
+ thumb
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
- PUBLIC xPortPendSVHandler
- PUBLIC vPortSVCHandler
- PUBLIC vPortStartFirstTask
+ PUBLIC xPortPendSVHandler
+ PUBLIC vPortSVCHandler
+ PUBLIC vPortStartFirstTask
/*-----------------------------------------------------------*/
xPortPendSVHandler:
- mrs r0, psp
- isb
- ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
- ldr r2, [r3]
+ mrs r0, psp
+ isb
+ ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
+ ldr r2, [r3]
- stmdb r0!, {r4-r11} /* Save the remaining registers. */
- str r0, [r2] /* Save the new top of stack into the first member of the TCB. */
+ stmdb r0!, {r4-r11} /* Save the remaining registers. */
+ str r0, [r2] /* Save the new top of stack into the first member of the TCB. */
- stmdb sp!, {r3, r14}
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r0
- dsb
- isb
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r3, r14}
+ stmdb sp!, {r3, r14}
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
+ ldmia sp!, {r3, r14}
- ldr r1, [r3]
- ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
- ldmia r0!, {r4-r11} /* Pop the registers. */
- msr psp, r0
- isb
- bx r14
+ ldr r1, [r3]
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
+ ldmia r0!, {r4-r11} /* Pop the registers. */
+ msr psp, r0
+ isb
+ bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r1, [r3]
- ldr r0, [r1]
- /* Pop the core registers. */
- ldmia r0!, {r4-r11}
- msr psp, r0
- isb
- mov r0, #0
- msr basepri, r0
- orr r14, r14, #13
- bx r14
+ /* Get the location of the current TCB. */
+ ldr r3, =pxCurrentTCB
+ ldr r1, [r3]
+ ldr r0, [r1]
+ /* Pop the core registers. */
+ ldmia r0!, {r4-r11}
+ msr psp, r0
+ isb
+ mov r0, #0
+ msr basepri, r0
+ orr r14, r14, #13
+ bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Call SVC to start the first task, ensuring interrupts are enabled. */
- cpsie i
- cpsie f
- dsb
- isb
- svc 0
+ /* Use the NVIC offset register to locate the stack. */
+ ldr r0, =0xE000ED08
+ ldr r0, [r0]
+ ldr r0, [r0]
+ /* Set the msp back to the start of the stack. */
+ msr msp, r0
+ /* Call SVC to start the first task, ensuring interrupts are enabled. */
+ cpsie i
+ cpsie f
+ dsb
+ isb
+ svc 0
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM3/portmacro.h b/Source/portable/IAR/ARM_CM3/portmacro.h
index 9e4ada4..3e67345 100644
--- a/Source/portable/IAR/ARM_CM3/portmacro.h
+++ b/Source/portable/IAR/ARM_CM3/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -30,9 +30,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -60,16 +62,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -201,8 +205,10 @@
#pragma diag_suppress=Pe191
#pragma diag_suppress=Pa082
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/port.c b/Source/portable/IAR/ARM_CM33/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/IAR/ARM_CM33/non_secure/port.c
+++ b/Source/portable/IAR/ARM_CM33/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/portasm.h b/Source/portable/IAR/ARM_CM33/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/IAR/ARM_CM33/non_secure/portasm.h
+++ b/Source/portable/IAR/ARM_CM33/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/portasm.s b/Source/portable/IAR/ARM_CM33/non_secure/portasm.s
index 2f0fb7e..5309103 100644
--- a/Source/portable/IAR/ARM_CM33/non_secure/portasm.s
+++ b/Source/portable/IAR/ARM_CM33/non_secure/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -32,322 +32,465 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
- EXTERN pxCurrentTCB
- EXTERN xSecureContext
- EXTERN vTaskSwitchContext
- EXTERN vPortSVCHandler_C
- EXTERN SecureContext_SaveContext
- EXTERN SecureContext_LoadContext
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
- PUBLIC xIsPrivileged
- PUBLIC vResetPrivilege
- PUBLIC vPortAllocateSecureContext
- PUBLIC vRestoreContextOfFirstTask
- PUBLIC vRaisePrivilege
- PUBLIC vStartFirstTask
- PUBLIC ulSetInterruptMask
- PUBLIC vClearInterruptMask
- PUBLIC PendSV_Handler
- PUBLIC SVC_Handler
- PUBLIC vPortFreeSecureContext
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN xSecureContext
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+ EXTERN SecureContext_SaveContext
+ EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vPortAllocateSecureContext
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+ PUBLIC vPortFreeSecureContext
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
- SECTION .text:CODE:NOROOT(2)
- THUMB
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
- mrs r0, control /* r0 = CONTROL. */
- tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- ite ne
- movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
- bx lr /* Return. */
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
- mrs r0, control /* r0 = CONTROL. */
- orr r0, r0, #1 /* r0 = r0 | 1. */
- msr control, r0 /* CONTROL = r0. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
- svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
- bx lr /* Return. */
+ svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
- SECTION privileged_functions:CODE:NOROOT(2)
- THUMB
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
vRestoreContextOfFirstTask:
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r3, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
#else /* configENABLE_MPU */
- ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- ldr r4, =xSecureContext
- str r1, [r4] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- movs r1, #2 /* r1 = 2. */
- msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r3, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ ldr r4, =xSecureContext
+ str r1, [r4] /* Set xSecureContext to this task's value for the same. */
+ msr psplim, r2 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
- mrs r0, control /* Read the CONTROL register. */
- bic r0, r0, #1 /* Clear the bit 0. */
- msr control, r0 /* Write back the new CONTROL value. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
- ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
- ldr r0, [r0] /* The first entry in vector table is stack pointer. */
- msr msp, r0 /* Set the MSP back to the start of the stack. */
- cpsie i /* Globally enable interrupts. */
- cpsie f
- dsb
- isb
- svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
- mrs r0, basepri /* r0 = basepri. Return original basepri value. */
- mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- dsb
- isb
- bx lr /* Return. */
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vClearInterruptMask:
- msr basepri, r0 /* basepri = ulMask. */
- dsb
- isb
- bx lr /* Return. */
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
PendSV_Handler:
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
- mrs r2, psp /* Read PSP in r2. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
- cbz r0, save_ns_context /* No secure context to save. */
- push {r0-r2, r14}
- bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r0-r3} /* LR is now in r3. */
- mov lr, r3 /* LR = r3. */
- lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ mrs r2, psp /* Read PSP in r2. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ push {r0-r2, r14}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* LR = r3. */
+ lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+ b select_next_task
+
+ save_ns_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ adds r2, r2, #12 /* r2 = r2 + 12. */
+ stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ subs r2, r2, #12 /* r2 = r2 - 12. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+
+ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ msr psplim, r1 /* Restore the PSPLIM register value for the task. */
+ mov lr, r4 /* LR = r4. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r3] /* Restore the task's xSecureContext. */
+ cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ push {r2, r4}
+ bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r2, r4}
+ mov lr, r4 /* LR = r4. */
+ lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+ restore_ns_context:
+ ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
#endif /* configENABLE_MPU */
- b select_next_task
-
- save_ns_context:
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- it eq
- vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
- #endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #12 /* r2 = r2 + 12. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- subs r2, r2, #12 /* r2 = r2 - 12. */
- stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
-
- select_next_task:
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- dsb
- isb
- bl vTaskSwitchContext
- mov r0, #0 /* r0 = 0. */
- msr basepri, r0 /* Enable interrupts. */
-
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
-
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
- ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #endif /* configENABLE_MPU */
-
- restore_ns_context:
- ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
- #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- it eq
- vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
- #endif /* configENABLE_FPU || configENABLE_MVE */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
SVC_Handler:
- tst lr, #4
- ite eq
- mrseq r0, msp
- mrsne r0, psp
- b vPortSVCHandler_C
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
- /* r0 = uint32_t *pulTCB. */
- ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
- ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
- cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
- it ne
- svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
- bx lr /* Return. */
+ /* r0 = uint32_t *pulTCB. */
+ ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
+ ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
+ cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
+ it ne
+ svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM33/non_secure/portmacro.h
index 3575c1f..4eb1c72 100644
--- a/Source/portable/IAR/ARM_CM33/non_secure/portmacro.h
+++ b/Source/portable/IAR/ARM_CM33/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -49,12 +49,12 @@
* Architecture specifics.
*/
#define portARCH_NAME "Cortex-M33"
+#define portHAS_BASEPRI 1
#define portDONT_DISCARD __root
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
- #error 16 MPU regions are not yet supported for this port.
-#endif
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
/*-----------------------------------------------------------*/
/**
@@ -71,8 +71,10 @@
#pragma diag_suppress=Pa082
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
+++ b/Source/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_context.c b/Source/portable/IAR/ARM_CM33/secure/secure_context.c
index 1996693..e37dd96 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_context.c
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_context.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_context.h b/Source/portable/IAR/ARM_CM33/secure/secure_context.h
index de33d15..2220ea6 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_context.h
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_context.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s b/Source/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s
index 4e26cf9..0da3e0f 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_heap.c b/Source/portable/IAR/ARM_CM33/secure/secure_heap.c
index b3bf007..19f7c23 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_heap.c
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_heap.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_heap.h b/Source/portable/IAR/ARM_CM33/secure/secure_heap.h
index e469f2c..75c9cb0 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_heap.h
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_heap.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_init.c b/Source/portable/IAR/ARM_CM33/secure/secure_init.c
index f6570d8..f93bfce 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_init.c
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_init.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_init.h b/Source/portable/IAR/ARM_CM33/secure/secure_init.h
index e89af71..e6c9da0 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_init.h
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_init.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33/secure/secure_port_macros.h b/Source/portable/IAR/ARM_CM33/secure/secure_port_macros.h
index 2fb7c59..d7ac583 100644
--- a/Source/portable/IAR/ARM_CM33/secure/secure_port_macros.h
+++ b/Source/portable/IAR/ARM_CM33/secure/secure_port_macros.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
index 349aeff..9712ac3 100644
--- a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
-/* MPU wrappers includes. */
+/* MPU includes. */
#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@@ -95,6 +96,26 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -111,6 +132,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -135,6 +164,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -178,6 +209,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -299,6 +354,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -352,8 +420,67 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/**
* @brief Each task maintains its own interrupt status in the critical nesting
* variable.
@@ -369,6 +496,19 @@
PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
#endif /* configENABLE_TRUSTZONE */
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
#if ( configUSE_TICKLESS_IDLE == 1 )
/**
@@ -656,10 +796,29 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@@ -827,9 +986,8 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@@ -839,7 +997,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -854,7 +1012,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -925,18 +1083,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -944,131 +1102,546 @@
}
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
-{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- #if ( portPRELOAD_REGISTERS == 0 )
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #else /* portPRELOAD_REGISTERS */
- {
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
-
- #if ( configENABLE_TRUSTZONE == 1 )
- {
- pxTopOfStack--;
- *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
- }
- #endif /* configENABLE_TRUSTZONE */
- }
- #endif /* portPRELOAD_REGISTERS */
-
- return pxTopOfStack;
-}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
@@ -1087,6 +1660,12 @@
/* Initialize the critical nesting count ready for the first task. */
ulCriticalNesting = 0;
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Start the first task. */
vStartFirstTask();
@@ -1122,7 +1701,6 @@
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
-
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;
@@ -1237,6 +1815,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
@@ -1259,3 +1885,159 @@
return xReturn;
}
/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.h b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.h
index 93606b1..f64ceb5 100644
--- a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.h
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
index 4d02a43..00ee5a5 100644
--- a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -32,231 +32,371 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
- EXTERN vPortSVCHandler_C
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
- PUBLIC xIsPrivileged
- PUBLIC vResetPrivilege
- PUBLIC vRestoreContextOfFirstTask
- PUBLIC vRaisePrivilege
- PUBLIC vStartFirstTask
- PUBLIC ulSetInterruptMask
- PUBLIC vClearInterruptMask
- PUBLIC PendSV_Handler
- PUBLIC SVC_Handler
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
- SECTION .text:CODE:NOROOT(2)
- THUMB
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
- mrs r0, control /* r0 = CONTROL. */
- tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- ite ne
- movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
- bx lr /* Return. */
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
- mrs r0, control /* r0 = CONTROL. */
- orr r0, r0, #1 /* r0 = r0 | 1. */
- msr control, r0 /* CONTROL = r0. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
- SECTION privileged_functions:CODE:NOROOT(2)
- THUMB
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
vRestoreContextOfFirstTask:
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
#else /* configENABLE_MPU */
- ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- movs r1, #2 /* r1 = 2. */
- msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r2 /* Finally, branch to EXC_RETURN. */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ msr psplim, r1 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
- mrs r0, control /* Read the CONTROL register. */
- bic r0, r0, #1 /* Clear the bit 0. */
- msr control, r0 /* Write back the new CONTROL value. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
- ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
- ldr r0, [r0] /* The first entry in vector table is stack pointer. */
- msr msp, r0 /* Set the MSP back to the start of the stack. */
- cpsie i /* Globally enable interrupts. */
- cpsie f
- dsb
- isb
- svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
- mrs r0, basepri /* r0 = basepri. Return original basepri value. */
- mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- dsb
- isb
- bx lr /* Return. */
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vClearInterruptMask:
- msr basepri, r0 /* basepri = ulMask. */
- dsb
- isb
- bx lr /* Return. */
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
PendSV_Handler:
- mrs r0, psp /* Read PSP in r0. */
-#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- it eq
- vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
-#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
#else /* configENABLE_MPU */
- mrs r2, psplim /* r2 = PSPLIM. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
- str r0, [r1] /* Save the new top of stack in TCB. */
-
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
- dsb
- isb
- bl vTaskSwitchContext
- mov r0, #0 /* r0 = 0. */
- msr basepri, r0 /* Enable interrupts. */
-
- ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r2] /* Read pxCurrentTCB. */
- ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
- ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
-
+PendSV_Handler:
+ mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
- tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
- it eq
- vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
- msr psplim, r2 /* Restore the PSPLIM register value for the task. */
+ mrs r2, psplim /* r2 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ str r0, [r1] /* Save the new top of stack in TCB. */
+
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+
+ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+ msr psplim, r2 /* Restore the PSPLIM register value for the task. */
+ msr psp, r0 /* Remember the new top of stack for the task. */
+ bx r3
+
#endif /* configENABLE_MPU */
- msr psp, r0 /* Remember the new top of stack for the task. */
- bx r3
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
SVC_Handler:
- tst lr, #4
- ite eq
- mrseq r0, msp
- mrsne r0, psp
- b vPortSVCHandler_C
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h
index 3575c1f..4eb1c72 100644
--- a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,11 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
+/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
-
-#include "portmacrocommon.h"
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -49,12 +49,12 @@
* Architecture specifics.
*/
#define portARCH_NAME "Cortex-M33"
+#define portHAS_BASEPRI 1
#define portDONT_DISCARD __root
/*-----------------------------------------------------------*/
-#if( configTOTAL_MPU_REGIONS == 16 )
- #error 16 MPU regions are not yet supported for this port.
-#endif
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
/*-----------------------------------------------------------*/
/**
@@ -71,8 +71,10 @@
#pragma diag_suppress=Pa082
/*-----------------------------------------------------------*/
+/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
index e68692a..6f666da 100644
--- a/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
+++ b/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -27,11 +27,13 @@
*/
#ifndef PORTMACROCOMMON_H
- #define PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*------------------------------------------------------------------------------
* Port specific definitions.
@@ -43,209 +45,329 @@
*------------------------------------------------------------------------------
*/
- #ifndef configENABLE_FPU
- #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
- #endif /* configENABLE_FPU */
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
- #ifndef configENABLE_MPU
- #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
- #endif /* configENABLE_MPU */
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
- #ifndef configENABLE_TRUSTZONE
- #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
- #endif /* configENABLE_TRUSTZONE */
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
- #define portNOP()
- #define portINLINE __inline
- #ifndef portFORCE_INLINE
- #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
- #endif
- #define portHAS_STACK_OVERFLOW_CHECKING 1
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
* @brief Extern declarations.
*/
- extern BaseType_t xPortIsInsideInterrupt( void );
+extern BaseType_t xPortIsInsideInterrupt( void );
- extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
- extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
- #if ( configENABLE_TRUSTZONE == 1 )
- extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
- extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
- #endif /* configENABLE_TRUSTZONE */
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
- #if ( configENABLE_MPU == 1 )
- extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
- extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
/**
* @brief MPU specific constants.
*/
- #if ( configENABLE_MPU == 1 )
- #define portUSING_MPU_WRAPPERS 1
- #define portPRIVILEGE_BIT ( 0x80000000UL )
- #else
- #define portPRIVILEGE_BIT ( 0x0UL )
- #endif /* configENABLE_MPU */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
/* MPU settings that can be overriden in FreeRTOSConfig.h. */
#ifndef configTOTAL_MPU_REGIONS
/* Define to 8 for backward compatibility. */
- #define configTOTAL_MPU_REGIONS ( 8UL )
+ #define configTOTAL_MPU_REGIONS ( 8UL )
#endif
/* MPU regions. */
- #define portPRIVILEGED_FLASH_REGION ( 0UL )
- #define portUNPRIVILEGED_FLASH_REGION ( 1UL )
- #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
- #define portPRIVILEGED_RAM_REGION ( 3UL )
- #define portSTACK_REGION ( 4UL )
- #define portFIRST_CONFIGURABLE_REGION ( 5UL )
- #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
- #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
- #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
- * 01 --> Device-nGnRE
- * 10 --> Device-nGRE
- * 11 --> Device-GRE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
- #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
- #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
- #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
- #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
- #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
- #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
- #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
- #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
- #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
- #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
- #define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
- #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
- #define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
- #define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
typedef struct MPURegionSettings
{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
-/**
- * @brief MPU settings as stored in the TCB.
- */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
* @brief SVC numbers.
*/
- #define portSVC_ALLOCATE_SECURE_CONTEXT 0
- #define portSVC_FREE_SECURE_CONTEXT 1
- #define portSVC_START_SCHEDULER 2
- #define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
- #define portYIELD() vPortYield()
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Tickless idle/low power functionality.
*/
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
- #if ( configENABLE_TRUSTZONE == 1 )
+#if ( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
@@ -256,7 +378,7 @@
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
- #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
@@ -264,18 +386,18 @@
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
- #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
- #endif /* configENABLE_TRUSTZONE */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
- #if ( configENABLE_MPU == 1 )
+#if ( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
- #define portIS_PRIVILEGED() xIsPrivileged()
+ #define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
@@ -284,28 +406,44 @@
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
- #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
- #define portRESET_PRIVILEGE() vResetPrivilege()
- #else
- #define portIS_PRIVILEGED()
- #define portRAISE_PRIVILEGE()
- #define portRESET_PRIVILEGE()
- #endif /* configENABLE_MPU */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
* @brief Barriers.
*/
- #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/*-----------------------------------------------------------*/
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM4F/port.c b/Source/portable/IAR/ARM_CM4F/port.c
index 79bdf68..692400b 100644
--- a/Source/portable/IAR/ARM_CM4F/port.c
+++ b/Source/portable/IAR/ARM_CM4F/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -65,8 +65,9 @@
#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL )
#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -239,10 +240,6 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
/* This port can be used on all revisions of the Cortex-M7 core other than
* the r0p1 parts. r0p1 parts should use the port from the
* /source/portable/GCC/ARM_CM7/r0p1 directory. */
@@ -251,7 +248,8 @@
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -261,7 +259,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -273,33 +271,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -308,7 +326,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -668,10 +686,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/IAR/ARM_CM4F/portasm.s b/Source/portable/IAR/ARM_CM4F/portasm.s
index 7ac74ff..89b72b2 100644
--- a/Source/portable/IAR/ARM_CM4F/portasm.s
+++ b/Source/portable/IAR/ARM_CM4F/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,123 +28,122 @@
#include <FreeRTOSConfig.h>
- RSEG CODE:CODE(2)
- thumb
+ RSEG CODE:CODE(2)
+ thumb
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
- PUBLIC xPortPendSVHandler
- PUBLIC vPortSVCHandler
- PUBLIC vPortStartFirstTask
- PUBLIC vPortEnableVFP
+ PUBLIC xPortPendSVHandler
+ PUBLIC vPortSVCHandler
+ PUBLIC vPortStartFirstTask
+ PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
- mrs r0, psp
- isb
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r2, [r3]
+ mrs r0, psp
+ isb
+ /* Get the location of the current TCB. */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3]
- /* Is the task using the FPU context? If so, push high vfp registers. */
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ /* Is the task using the FPU context? If so, push high vfp registers. */
+ tst r14, #0x10
+ it eq
+ vstmdbeq r0!, {s16-s31}
- /* Save the core registers. */
- stmdb r0!, {r4-r11, r14}
+ /* Save the core registers. */
+ stmdb r0!, {r4-r11, r14}
- /* Save the new top of stack into the first member of the TCB. */
- str r0, [r2]
+ /* Save the new top of stack into the first member of the TCB. */
+ str r0, [r2]
- stmdb sp!, {r0, r3}
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- msr basepri, r0
- dsb
- isb
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r0, r3}
+ stmdb sp!, {r0, r3}
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
+ ldmia sp!, {r0, r3}
- /* The first item in pxCurrentTCB is the task top of stack. */
- ldr r1, [r3]
- ldr r0, [r1]
+ /* The first item in pxCurrentTCB is the task top of stack. */
+ ldr r1, [r3]
+ ldr r0, [r1]
- /* Pop the core registers. */
- ldmia r0!, {r4-r11, r14}
+ /* Pop the core registers. */
+ ldmia r0!, {r4-r11, r14}
- /* Is the task using the FPU context? If so, pop the high vfp registers
- too. */
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ /* Is the task using the FPU context? If so, pop the high vfp registers
+ too. */
+ tst r14, #0x10
+ it eq
+ vldmiaeq r0!, {s16-s31}
- msr psp, r0
- isb
- #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
- #if WORKAROUND_PMU_CM001 == 1
- push { r14 }
- pop { pc }
- #endif
- #endif
+ msr psp, r0
+ isb
+ #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
+ #if WORKAROUND_PMU_CM001 == 1
+ push { r14 }
+ pop { pc }
+ #endif
+ #endif
- bx r14
+ bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r1, [r3]
- ldr r0, [r1]
- /* Pop the core registers. */
- ldmia r0!, {r4-r11, r14}
- msr psp, r0
- isb
- mov r0, #0
- msr basepri, r0
- bx r14
+ /* Get the location of the current TCB. */
+ ldr r3, =pxCurrentTCB
+ ldr r1, [r3]
+ ldr r0, [r1]
+ /* Pop the core registers. */
+ ldmia r0!, {r4-r11, r14}
+ msr psp, r0
+ isb
+ mov r0, #0
+ msr basepri, r0
+ bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Clear the bit that indicates the FPU is in use in case the FPU was used
- before the scheduler was started - which would otherwise result in the
- unnecessary leaving of space in the SVC stack for lazy saving of FPU
- registers. */
- mov r0, #0
- msr control, r0
- /* Call SVC to start the first task. */
- cpsie i
- cpsie f
- dsb
- isb
- svc 0
+ /* Use the NVIC offset register to locate the stack. */
+ ldr r0, =0xE000ED08
+ ldr r0, [r0]
+ ldr r0, [r0]
+ /* Set the msp back to the start of the stack. */
+ msr msp, r0
+ /* Clear the bit that indicates the FPU is in use in case the FPU was used
+ before the scheduler was started - which would otherwise result in the
+ unnecessary leaving of space in the SVC stack for lazy saving of FPU
+ registers. */
+ mov r0, #0
+ msr control, r0
+ /* Call SVC to start the first task. */
+ cpsie i
+ cpsie f
+ dsb
+ isb
+ svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
- /* The FPU enable bits are in the CPACR. */
- ldr.w r0, =0xE000ED88
- ldr r1, [r0]
+ /* The FPU enable bits are in the CPACR. */
+ ldr.w r0, =0xE000ED88
+ ldr r1, [r0]
- /* Enable CP10 and CP11 coprocessors, then save back. */
- orr r1, r1, #( 0xf << 20 )
- str r1, [r0]
- bx r14
+ /* Enable CP10 and CP11 coprocessors, then save back. */
+ orr r1, r1, #( 0xf << 20 )
+ str r1, [r0]
+ bx r14
- END
-
+ END
diff --git a/Source/portable/IAR/ARM_CM4F/portmacro.h b/Source/portable/IAR/ARM_CM4F/portmacro.h
index 07779bb..92cb7c7 100644
--- a/Source/portable/IAR/ARM_CM4F/portmacro.h
+++ b/Source/portable/IAR/ARM_CM4F/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,9 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -59,16 +61,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -200,8 +204,10 @@
#pragma diag_suppress=Pe191
#pragma diag_suppress=Pa082
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM4_MPU/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM4_MPU/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..276a1cf
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM4_MPU/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1340 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+ * contains code not understood by the assembler - for example the 'extern' keyword.
+ * To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+ * the code is included in C files but excluded by the preprocessor in assembly
+ * files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM4_MPU/port.c b/Source/portable/IAR/ARM_CM4_MPU/port.c
index f6dae02..518dfbc 100644
--- a/Source/portable/IAR/ARM_CM4_MPU/port.c
+++ b/Source/portable/IAR/ARM_CM4_MPU/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -41,6 +41,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
+#include "mpu_syscall_numbers.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@@ -104,8 +105,9 @@
#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL )
#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
#define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
@@ -131,8 +133,14 @@
#define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 )
#define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* The systick is a 24-bit counter. */
#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
@@ -146,6 +154,21 @@
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/*
* Configure a number of standard MPU regions that are used by all tasks.
*/
@@ -183,7 +206,7 @@
/*
* The C portion of the SVC handler.
*/
-void vPortSVCHandler_C( uint32_t * pulParam );
+void vPortSVCHandler_C( uint32_t * pulParam ) PRIVILEGED_FUNCTION;
/*
* Called from the SVC handler used to start the scheduler.
@@ -193,7 +216,7 @@
/**
* @brief Enter critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@@ -202,17 +225,73 @@
/**
* @brief Exit from critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
* variable. */
static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/*
+ * This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
/*
* Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
* FreeRTOS API functions are not called from interrupts that have been assigned
@@ -232,54 +311,64 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
-
- /* Offset added to account for the way the MCU uses the stack on entry/exit
- * of interrupts, and to ensure alignment. */
- pxTopOfStack--;
-
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0; /* LR */
-
- /* Save code space by skipping register initialisation. */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
-void vPortSVCHandler_C( uint32_t * pulParam )
+void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@@ -305,51 +394,298 @@
break;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
- case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
- * svc was raised from any of the
- * system calls. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
+ * svc was raised from any of the
+ * system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- __asm volatile
- (
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
- ::: "r1", "memory"
- );
- }
-
- break;
- #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
- case portSVC_RAISE_PRIVILEGE:
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
__asm volatile
(
- " mrs r1, control \n"/* Obtain current control value. */
- " bic r1, r1, #1 \n"/* Set privilege bit. */
- " msr control, r1 \n"/* Write back new control value. */
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
- break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ }
- default: /* Unknown SVC call. */
- break;
+ break;
+ #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ case portSVC_RAISE_PRIVILEGE:
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, r1, #1 \n" /* Set privilege bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+ break;
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default: /* Unknown SVC call. */
+ break;
}
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
/*
* See header file for description.
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
/* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0
* and r0p1 cores. */
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
@@ -363,66 +699,87 @@
#endif
#if ( configASSERT_DEFINED == 1 )
+ {
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = *pucFirstUserPriorityRegister;
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- volatile uint32_t ulOriginalPriority;
- volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
- volatile uint8_t ucMaxPriorityValue;
-
- /* Determine the maximum priority from which ISR safe FreeRTOS API
- * functions can be called. ISR safe functions are those that end in
- * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
- * ensure interrupt entry is as fast and simple as possible.
- *
- * Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
-
- /* Determine the number of priority bits available. First write to all
- * possible bits. */
- *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
-
- /* Read the value back to see how many bits stuck. */
- ucMaxPriorityValue = *pucFirstUserPriorityRegister;
-
- /* Use the same mask on the maximum system call priority. */
- ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
-
- /* Calculate the maximum acceptable priority group value for the number
- * of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
-
- while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
- {
- ulMaxPRIGROUPValue--;
- ucMaxPriorityValue <<= ( uint8_t ) 0x01;
- }
-
- #ifdef __NVIC_PRIO_BITS
- {
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
- }
- #endif
-
- #ifdef configPRIO_BITS
- {
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
- }
- #endif
-
- /* Shift the priority group value back to its position within the AIRCR
- * register. */
- ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
- ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
-
- /* Restore the clobbered interrupt priority register to its original
- * value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
+ }
#endif /* configASSERT_DEFINED */
/* Make PendSV and SysTick the lowest priority interrupts. */
@@ -439,6 +796,12 @@
/* Initialise the critical nesting count ready for the first task. */
uxCriticalNesting = 0;
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Ensure the VFP is enabled - it should be anyway. */
vPortEnableVFP();
@@ -463,14 +826,49 @@
void vPortEnterCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ * assert() if it is being called from an interrupt context. Only API
+ * functions that end in "FromISR" can be used in an interrupt. Only assert if
+ * the critical nesting count is 1 to protect against recursive calls if the
+ * assert function also uses a critical section. */
+ if( uxCriticalNesting == 1 )
+ {
+ configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
+ }
+
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ * assert() if it is being called from an interrupt context. Only API
+ * functions that end in "FromISR" can be used in an interrupt. Only assert if
+ * the critical nesting count is 1 to protect against recursive calls if the
+ * assert function also uses a critical section. */
+ if( uxCriticalNesting == 1 )
+ {
+ configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
+ }
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
+
/* This is not the interrupt safe version of the enter critical function so
* assert() if it is being called from an interrupt context. Only API
* functions that end in "FromISR" can be used in an interrupt. Only assert if
@@ -480,49 +878,42 @@
{
configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
}
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
- /* This is not the interrupt safe version of the enter critical function so
- * assert() if it is being called from an interrupt context. Only API
- * functions that end in "FromISR" can be used in an interrupt. Only assert if
- * the critical nesting count is 1 to protect against recursive calls if the
- * assert function also uses a critical section. */
- if( uxCriticalNesting == 1 )
- {
- configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
- }
- }
-#else
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
- /* This is not the interrupt safe version of the enter critical function so
- * assert() if it is being called from an interrupt context. Only API
- * functions that end in "FromISR" can be used in an interrupt. Only assert if
- * the critical nesting count is 1 to protect against recursive calls if the
- * assert function also uses a critical section. */
- if( uxCriticalNesting == 1 )
- {
- configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
- }
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@@ -530,30 +921,7 @@
{
portENABLE_INTERRUPTS();
}
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
- }
-#else
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@@ -705,7 +1073,7 @@
xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress =
( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */
( portMPU_REGION_VALID ) |
- ( portSTACK_REGION ); /* Region number. */
+ ( portSTACK_REGION ); /* Region number. */
xMPUSettings->xRegion[ 0 ].ulRegionAttribute =
( portMPU_REGION_READ_WRITE ) |
@@ -714,11 +1082,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -741,6 +1117,12 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -761,12 +1143,30 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -775,6 +1175,48 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
@@ -803,10 +1245,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
@@ -835,3 +1277,98 @@
#endif /* configASSERT_DEFINED */
/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM4_MPU/portasm.s b/Source/portable/IAR/ARM_CM4_MPU/portasm.s
index a399bf5..0da9a4f 100644
--- a/Source/portable/IAR/ARM_CM4_MPU/portasm.s
+++ b/Source/portable/IAR/ARM_CM4_MPU/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -25,240 +25,276 @@
* https://github.com/FreeRTOS
*
*/
+
/* Including FreeRTOSConfig.h here will cause build errors if the header file
contains code not understood by the assembler - for example the 'extern' keyword.
To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include <FreeRTOSConfig.h>
+#include <mpu_syscall_numbers.h>
- RSEG CODE:CODE(2)
- thumb
+ RSEG CODE:CODE(2)
+ thumb
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
- EXTERN vPortSVCHandler_C
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
- PUBLIC xPortPendSVHandler
- PUBLIC vPortSVCHandler
- PUBLIC vPortStartFirstTask
- PUBLIC vPortEnableVFP
- PUBLIC vPortRestoreContextOfFirstTask
- PUBLIC xIsPrivileged
- PUBLIC vResetPrivilege
+ PUBLIC xPortPendSVHandler
+ PUBLIC vPortSVCHandler
+ PUBLIC vPortStartFirstTask
+ PUBLIC vPortEnableVFP
+ PUBLIC vPortRestoreContextOfFirstTask
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
/*-----------------------------------------------------------*/
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_START_SCHEDULER 100
+#define portSVC_SYSTEM_CALL_EXIT 103
+/*-----------------------------------------------------------*/
+
xPortPendSVHandler:
- mrs r0, psp
- isb
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r2, [r3]
- /* Is the task using the FPU context? If so, push high vfp registers. */
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location where the context should be saved. */
- /* Save the core registers. */
- mrs r1, control
- stmdb r0!, {r1, r4-r11, r14}
+ /*------------ Save Context. ----------- */
+ mrs r3, control
+ mrs r0, psp
+ isb
- /* Save the new top of stack into the first member of the TCB. */
- str r0, [r2]
+ add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */
- stmdb sp!, {r0, r3}
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- msr basepri, r0
- dsb
- isb
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r0, r3}
+ stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */
+ ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */
+ stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */
- /* The first item in pxCurrentTCB is the task top of stack. */
- ldr r1, [r3]
- ldr r0, [r1]
- /* Move onto the second item in the TCB... */
- add r1, r1, #4
+ /*---------- Select next task. --------- */
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
+ msr basepri, r0
+ dsb
+ isb
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- /* Region Base Address register. */
- ldr r2, =0xe000ed9c
- /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, {r4-r11}
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- #ifdef configTOTAL_MPU_REGIONS
- #if ( configTOTAL_MPU_REGIONS == 16 )
- /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- stmia r2, {r4-r11}
- /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- stmia r2, {r4-r11}
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- #endif /* configTOTAL_MPU_REGIONS */
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+#ifdef configTOTAL_MPU_REGIONS
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+#endif
- /* Pop the registers that are not automatically saved on exception entry. */
- ldmia r0!, {r3-r11, r14}
- msr control, r3
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
- /* Is the task using the FPU context? If so, pop the high vfp registers
- too. */
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
- msr psp, r0
- isb
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ msr control, r3
- bx r14
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
/*-----------------------------------------------------------*/
-vPortSVCHandler:
- #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
- tst lr, #4
- ite eq
- mrseq r0, msp
- mrsne r0, psp
- #else
- mrs r0, psp
- #endif
- b vPortSVCHandler_C
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+vPortSVCHandler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #portSVC_SYSTEM_CALL_EXIT
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+vPortSVCHandler:
+ #ifndef USE_PROCESS_STACK
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ #else
+ mrs r0, psp
+ #endif
+ b vPortSVCHandler_C
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortStartFirstTask:
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Clear the bit that indicates the FPU is in use in case the FPU was used
- before the scheduler was started - which would otherwise result in the
- unnecessary leaving of space in the SVC stack for lazy saving of FPU
- registers. */
- mov r0, #0
- msr control, r0
- /* Call SVC to start the first task. */
- cpsie i
- cpsie f
- dsb
- isb
- svc 0
+ /* Use the NVIC offset register to locate the stack. */
+ ldr r0, =0xE000ED08
+ ldr r0, [r0]
+ ldr r0, [r0]
+ /* Set the msp back to the start of the stack. */
+ msr msp, r0
+ /* Clear the bit that indicates the FPU is in use in case the FPU was used
+ before the scheduler was started - which would otherwise result in the
+ unnecessary leaving of space in the SVC stack for lazy saving of FPU
+ registers. */
+ mov r0, #0
+ msr control, r0
+ /* Call SVC to start the first task. */
+ cpsie i
+ cpsie f
+ dsb
+ isb
+ svc #portSVC_START_SCHEDULER
/*-----------------------------------------------------------*/
vPortRestoreContextOfFirstTask:
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Restore the context. */
- ldr r3, =pxCurrentTCB
- ldr r1, [r3]
- /* The first item in the TCB is the task top of stack. */
- ldr r0, [r1]
- /* Move onto the second item in the TCB... */
- add r1, r1, #4
+ ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0]
+ ldr r0, [r0]
+ msr msp, r0 /* Set the msp back to the start of the stack. */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- /* Region Base Address register. */
- ldr r2, =0xe000ed9c
- /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, {r4-r11}
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- #ifdef configTOTAL_MPU_REGIONS
- #if ( configTOTAL_MPU_REGIONS == 16 )
- /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- stmia r2, {r4-r11}
- /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- stmia r2, {r4-r11}
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- #endif /* configTOTAL_MPU_REGIONS */
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+#ifdef configTOTAL_MPU_REGIONS
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+#endif
- /* Pop the registers that are not automatically saved on exception entry. */
- ldmia r0!, {r3-r11, r14}
- msr control, r3
- /* Restore the task stack pointer. */
- msr psp, r0
- mov r0, #0
- msr basepri, r0
- bx r14
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ msr control, r3
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+
+ mov r0, #0
+ msr basepri, r0
+ bx lr
/*-----------------------------------------------------------*/
vPortEnableVFP:
- /* The FPU enable bits are in the CPACR. */
- ldr.w r0, =0xE000ED88
- ldr r1, [r0]
+ /* The FPU enable bits are in the CPACR. */
+ ldr.w r0, =0xE000ED88
+ ldr r1, [r0]
- /* Enable CP10 and CP11 coprocessors, then save back. */
- orr r1, r1, #( 0xf << 20 )
- str r1, [r0]
- bx r14
+ /* Enable CP10 and CP11 coprocessors, then save back. */
+ orr r1, r1, #( 0xf << 20 )
+ str r1, [r0]
+ bx r14
/*-----------------------------------------------------------*/
xIsPrivileged:
- mrs r0, control /* r0 = CONTROL. */
- tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
- ite ne
- movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
- moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
- bx lr /* Return. */
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
+ bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
- mrs r0, control /* r0 = CONTROL. */
- orr r0, r0, #1 /* r0 = r0 | 1. */
- msr control, r0 /* CONTROL = r0. */
- bx lr /* Return to the caller. */
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
- END
+ END
diff --git a/Source/portable/IAR/ARM_CM4_MPU/portmacro.h b/Source/portable/IAR/ARM_CM4_MPU/portmacro.h
index cf02898..cae9bcc 100644
--- a/Source/portable/IAR/ARM_CM4_MPU/portmacro.h
+++ b/Source/portable/IAR/ARM_CM4_MPU/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -62,16 +62,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
-#if ( configUSE_16_BIT_TICKS == 1 )
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
-#else
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -193,9 +195,51 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+#define MAX_CONTEXT_SIZE ( 52 )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+#define portACL_ENTRY_SIZE_BITS ( 32U )
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -205,13 +249,14 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 100
+#define portSVC_YIELD 101
+#define portSVC_RAISE_PRIVILEGE 102
+#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */
-#define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
+#define portYIELD() __asm volatile ( " SVC %0 \n"::"i" ( portSVC_YIELD ) : "memory" )
#define portYIELD_WITHIN_API() \
{ \
/* Set a PendSV to request a context switch. */ \
@@ -346,6 +391,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
#ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY
#warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html"
#define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/port.c b/Source/portable/IAR/ARM_CM55/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/portasm.h b/Source/portable/IAR/ARM_CM55/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/portasm.s b/Source/portable/IAR/ARM_CM55/non_secure/portasm.s
new file mode 100644
index 0000000..5309103
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/portasm.s
@@ -0,0 +1,496 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN xSecureContext
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+ EXTERN SecureContext_SaveContext
+ EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vPortAllocateSecureContext
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+ PUBLIC vPortFreeSecureContext
+/*-----------------------------------------------------------*/
+
+/*---------------- Unprivileged Functions -------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+xIsPrivileged:
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vResetPrivilege:
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vPortAllocateSecureContext:
+ svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+/*----------------- Privileged Functions --------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r3, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ ldr r4, =xSecureContext
+ str r1, [r4] /* Set xSecureContext to this task's value for the same. */
+ msr psplim, r2 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r3 /* Finally, branch to EXC_RETURN. */
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+vRaisePrivilege:
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vStartFirstTask:
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
+/*-----------------------------------------------------------*/
+
+ulSetInterruptMask:
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vClearInterruptMask:
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ mrs r2, psp /* Read PSP in r2. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ push {r0-r2, r14}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* LR = r3. */
+ lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+ b select_next_task
+
+ save_ns_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ adds r2, r2, #12 /* r2 = r2 + 12. */
+ stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ subs r2, r2, #12 /* r2 = r2 - 12. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+
+ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ msr psplim, r1 /* Restore the PSPLIM register value for the task. */
+ mov lr, r4 /* LR = r4. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r3] /* Restore the task's xSecureContext. */
+ cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ push {r2, r4}
+ bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r2, r4}
+ mov lr, r4 /* LR = r4. */
+ lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+ restore_ns_context:
+ ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+vPortFreeSecureContext:
+ /* r0 = uint32_t *pulTCB. */
+ ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
+ ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
+ cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
+ it ne
+ svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM55/non_secure/portmacro.h
new file mode 100644
index 0000000..15cb65e
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/portmacro.h
@@ -0,0 +1,85 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M55"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __root
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
+ * the source code because to do so would cause other compilers to generate
+ * warnings. */
+#pragma diag_suppress=Be006
+#pragma diag_suppress=Pa082
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_context.c b/Source/portable/IAR/ARM_CM55/secure/secure_context.c
new file mode 100644
index 0000000..e37dd96
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_context.c
@@ -0,0 +1,351 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief CONTROL value for privileged tasks.
+ *
+ * Bit[0] - 0 --> Thread mode is privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
+
+/**
+ * @brief CONTROL value for un-privileged tasks.
+ *
+ * Bit[0] - 1 --> Thread mode is un-privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
+
+/**
+ * @brief Size of stack seal values in bytes.
+ */
+#define securecontextSTACK_SEAL_SIZE 8
+
+/**
+ * @brief Stack seal value as recommended by ARM.
+ */
+#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5
+
+/**
+ * @brief Maximum number of secure contexts.
+ */
+#ifndef secureconfigMAX_SECURE_CONTEXTS
+ #define secureconfigMAX_SECURE_CONTEXTS 8UL
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Pre-allocated array of secure contexts.
+ */
+SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ];
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Get a free secure context for a task from the secure context pool (xSecureContexts).
+ *
+ * This function ensures that only one secure context is allocated for a task.
+ *
+ * @param[in] pvTaskHandle The task handle for which the secure context is allocated.
+ *
+ * @return Index of a free secure context in the xSecureContexts array.
+ */
+static uint32_t ulGetSecureContext( void * pvTaskHandle );
+
+/**
+ * @brief Return the secure context to the secure context pool (xSecureContexts).
+ *
+ * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array.
+ */
+static void vReturnSecureContext( uint32_t ulSecureContextIndex );
+
+/* These are implemented in assembly. */
+extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext );
+extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext );
+/*-----------------------------------------------------------*/
+
+static uint32_t ulGetSecureContext( void * pvTaskHandle )
+{
+ /* Start with invalid index. */
+ uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) &&
+ ( xSecureContexts[ i ].pucStackLimit == NULL ) &&
+ ( xSecureContexts[ i ].pucStackStart == NULL ) &&
+ ( xSecureContexts[ i ].pvTaskHandle == NULL ) &&
+ ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = i;
+ }
+ else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle )
+ {
+ /* A task can only have one secure context. Do not allocate a second
+ * context for the same task. */
+ ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+ break;
+ }
+ }
+
+ return ulSecureContextIndex;
+}
+/*-----------------------------------------------------------*/
+
+static void vReturnSecureContext( uint32_t ulSecureContextIndex )
+{
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
+{
+ uint32_t ulIPSR, i;
+ static uint32_t ulSecureContextsInitialized = 0;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) )
+ {
+ /* Ensure to initialize secure contexts only once. */
+ ulSecureContextsInitialized = 1;
+
+ /* No stack for thread mode until a task's context is loaded. */
+ secureportSET_PSPLIM( securecontextNO_STACK );
+ secureportSET_PSP( securecontextNO_STACK );
+
+ /* Initialize all secure contexts. */
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ xSecureContexts[ i ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ i ].pucStackLimit = NULL;
+ xSecureContexts[ i ].pucStackStart = NULL;
+ xSecureContexts[ i ].pvTaskHandle = NULL;
+ }
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Configure thread mode to use PSP and to be unprivileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Configure thread mode to use PSP and to be privileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
+ }
+ #endif /* configENABLE_MPU */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle )
+#else /* configENABLE_MPU */
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle )
+#endif /* configENABLE_MPU */
+{
+ uint8_t * pucStackMemory = NULL;
+ uint8_t * pucStackLimit;
+ uint32_t ulIPSR, ulSecureContextIndex;
+ SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID;
+
+ #if ( configENABLE_MPU == 1 )
+ uint32_t * pulCurrentStackPointer = NULL;
+ #endif /* configENABLE_MPU */
+
+ /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit
+ * Register (PSPLIM) value. */
+ secureportREAD_IPSR( ulIPSR );
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode.
+ * Also do nothing, if a secure context us already loaded. PSPLIM is set to
+ * securecontextNO_STACK when no secure context is loaded. */
+ if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) )
+ {
+ /* Ontain a free secure context. */
+ ulSecureContextIndex = ulGetSecureContext( pvTaskHandle );
+
+ /* Were we able to get a free context? */
+ if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS )
+ {
+ /* Allocate the stack space. */
+ pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE );
+
+ if( pucStackMemory != NULL )
+ {
+ /* Since stack grows down, the starting point will be the last
+ * location. Note that this location is next to the last
+ * allocated byte for stack (excluding the space for seal values)
+ * because the hardware decrements the stack pointer before
+ * writing i.e. if stack pointer is 0x2, a push operation will
+ * decrement the stack pointer to 0x1 and then write at 0x1. */
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize;
+
+ /* Seal the created secure process stack. */
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE;
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE;
+
+ /* The stack cannot go beyond this location. This value is
+ * programmed in the PSPLIM register on context switch.*/
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory;
+
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Store the correct CONTROL value for the task on the stack.
+ * This value is programmed in the CONTROL register on
+ * context switch. */
+ pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ pulCurrentStackPointer--;
+
+ if( ulIsTaskPrivileged )
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
+ }
+ else
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
+ }
+
+ /* Store the current stack pointer. This value is programmed in
+ * the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Current SP is set to the starting of the stack. This
+ * value programmed in the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Ensure to never return 0 as a valid context handle. */
+ xSecureContextHandle = ulSecureContextIndex + 1UL;
+ }
+ }
+ }
+
+ return xSecureContextHandle;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint32_t ulIPSR, ulSecureContextIndex;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* Only free if a valid context handle is passed. */
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ /* Ensure that the secure context being deleted is associated with
+ * the task. */
+ if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle )
+ {
+ /* Free the stack space. */
+ vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit );
+
+ /* Return the secure context back to the free secure contexts pool. */
+ vReturnSecureContext( ulSecureContextIndex );
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that no secure context is loaded and the task is loading it's
+ * own context. */
+ if( ( pucStackLimit == securecontextNO_STACK ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that task's context is loaded and the task is saving it's own
+ * context. */
+ if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_context.h b/Source/portable/IAR/ARM_CM55/secure/secure_context.h
new file mode 100644
index 0000000..2220ea6
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_context.h
@@ -0,0 +1,135 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_CONTEXT_H__
+#define __SECURE_CONTEXT_H__
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* FreeRTOS includes. */
+#include "FreeRTOSConfig.h"
+
+/**
+ * @brief PSP value when no secure context is loaded.
+ */
+#define securecontextNO_STACK 0x0
+
+/**
+ * @brief Invalid context ID.
+ */
+#define securecontextINVALID_CONTEXT_ID 0UL
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Structure to represent a secure context.
+ *
+ * @note Since stack grows down, pucStackStart is the highest address while
+ * pucStackLimit is the first address of the allocated memory.
+ */
+typedef struct SecureContext
+{
+ uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
+ uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
+ uint8_t * pucStackStart; /**< First location of the stack memory. */
+ void * pvTaskHandle; /**< Task handle of the task this context is associated with. */
+} SecureContext_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Opaque handle for a secure context.
+ */
+typedef uint32_t SecureContextHandle_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Initializes the secure context management system.
+ *
+ * PSP is set to NULL and therefore a task must allocate and load a context
+ * before calling any secure side function in the thread mode.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureContext_Init( void );
+
+/**
+ * @brief Allocates a context on the secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] ulSecureStackSize Size of the stack to allocate on secure side.
+ * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise.
+ *
+ * @return Opaque context handle if context is successfully allocated, NULL
+ * otherwise.
+ */
+#if ( configENABLE_MPU == 1 )
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle );
+#else /* configENABLE_MPU */
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle );
+#endif /* configENABLE_MPU */
+
+/**
+ * @brief Frees the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the
+ * context to be freed.
+ */
+void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Loads the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be loaded.
+ */
+void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Saves the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be saved.
+ */
+void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+#endif /* __SECURE_CONTEXT_H__ */
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s b/Source/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s
new file mode 100644
index 0000000..0da3e0f
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s
@@ -0,0 +1,86 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+ PUBLIC SecureContext_LoadContextAsm
+ PUBLIC SecureContext_SaveContextAsm
+/*-----------------------------------------------------------*/
+
+SecureContext_LoadContextAsm:
+ /* pxSecureContext value is in r0. */
+ mrs r1, ipsr /* r1 = IPSR. */
+ cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
+ ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */
+
+#if ( configENABLE_MPU == 1 )
+ ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
+ msr control, r3 /* CONTROL = r3. */
+#endif /* configENABLE_MPU */
+
+ msr psplim, r2 /* PSPLIM = r2. */
+ msr psp, r1 /* PSP = r1. */
+
+ load_ctx_therad_mode:
+ bx lr
+/*-----------------------------------------------------------*/
+
+SecureContext_SaveContextAsm:
+ /* pxSecureContext value is in r0. */
+ mrs r1, ipsr /* r1 = IPSR. */
+ cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
+ mrs r1, psp /* r1 = PSP. */
+
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */
+ vldmia r1!, {s0} /* Nullify the effect of the previous statement. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+#if ( configENABLE_MPU == 1 )
+ mrs r2, control /* r2 = CONTROL. */
+ stmdb r1!, {r2} /* Store CONTROL value on the stack. */
+#endif /* configENABLE_MPU */
+
+ str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */
+ movs r1, #0 /* r1 = securecontextNO_STACK. */
+ msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
+ msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
+
+ save_ctx_therad_mode:
+ bx lr
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_heap.c b/Source/portable/IAR/ARM_CM55/secure/secure_heap.c
new file mode 100644
index 0000000..19f7c23
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_heap.c
@@ -0,0 +1,454 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure context heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Total heap size.
+ */
+#ifndef secureconfigTOTAL_HEAP_SIZE
+ #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
+#endif
+
+/* No test marker by default. */
+#ifndef mtCOVERAGE_TEST_MARKER
+ #define mtCOVERAGE_TEST_MARKER()
+#endif
+
+/* No tracing by default. */
+#ifndef traceMALLOC
+ #define traceMALLOC( pvReturn, xWantedSize )
+#endif
+
+/* No tracing by default. */
+#ifndef traceFREE
+ #define traceFREE( pv, xBlockSize )
+#endif
+
+/* Block sizes must not get too small. */
+#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
+
+/* Assumes 8bit bytes! */
+#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
+/*-----------------------------------------------------------*/
+
+/* Allocate the memory for the heap. */
+#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
+
+/* The application writer has already defined the array used for the RTOS
+* heap - probably so it can be placed in a special segment or address. */
+ extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#else /* configAPPLICATION_ALLOCATED_HEAP */
+ static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/**
+ * @brief The linked list structure.
+ *
+ * This is used to link free blocks in order of their memory address.
+ */
+typedef struct A_BLOCK_LINK
+{
+ struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
+ size_t xBlockSize; /**< The size of the free block. */
+} BlockLink_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Called automatically to setup the required heap structures the first
+ * time pvPortMalloc() is called.
+ */
+static void prvHeapInit( void );
+
+/**
+ * @brief Inserts a block of memory that is being freed into the correct
+ * position in the list of free memory blocks.
+ *
+ * The block being freed will be merged with the block in front it and/or the
+ * block behind it if the memory blocks are adjacent to each other.
+ *
+ * @param[in] pxBlockToInsert The block being freed.
+ */
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The size of the structure placed at the beginning of each allocated
+ * memory block must by correctly byte aligned.
+ */
+static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+
+/**
+ * @brief Create a couple of list links to mark the start and end of the list.
+ */
+static BlockLink_t xStart;
+static BlockLink_t * pxEnd = NULL;
+
+/**
+ * @brief Keeps track of the number of free bytes remaining, but says nothing
+ * about fragmentation.
+ */
+static size_t xFreeBytesRemaining = 0U;
+static size_t xMinimumEverFreeBytesRemaining = 0U;
+
+/**
+ * @brief Gets set to the top bit of an size_t type.
+ *
+ * When this bit in the xBlockSize member of an BlockLink_t structure is set
+ * then the block belongs to the application. When the bit is free the block is
+ * still part of the free heap space.
+ */
+static size_t xBlockAllocatedBit = 0;
+/*-----------------------------------------------------------*/
+
+static void prvHeapInit( void )
+{
+ BlockLink_t * pxFirstFreeBlock;
+ uint8_t * pucAlignedHeap;
+ size_t uxAddress;
+ size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
+
+ /* Ensure the heap starts on a correctly aligned boundary. */
+ uxAddress = ( size_t ) ucHeap;
+
+ if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
+ {
+ uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
+ }
+
+ pucAlignedHeap = ( uint8_t * ) uxAddress;
+
+ /* xStart is used to hold a pointer to the first item in the list of free
+ * blocks. The void cast is used to prevent compiler warnings. */
+ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
+ xStart.xBlockSize = ( size_t ) 0;
+
+ /* pxEnd is used to mark the end of the list of free blocks and is inserted
+ * at the end of the heap space. */
+ uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
+ uxAddress -= xHeapStructSize;
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ pxEnd = ( void * ) uxAddress;
+ pxEnd->xBlockSize = 0;
+ pxEnd->pxNextFreeBlock = NULL;
+
+ /* To start with there is a single free block that is sized to take up the
+ * entire heap space, minus the space taken by pxEnd. */
+ pxFirstFreeBlock = ( void * ) pucAlignedHeap;
+ pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
+ pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
+
+ /* Only one block exists - and it covers the entire usable heap space. */
+ xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+ xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+
+ /* Work out the position of the top bit in a size_t variable. */
+ xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
+}
+/*-----------------------------------------------------------*/
+
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert )
+{
+ BlockLink_t * pxIterator;
+ uint8_t * puc;
+
+ /* Iterate through the list until a block is found that has a higher address
+ * than the block being inserted. */
+ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
+ {
+ /* Nothing to do here, just iterate to the right position. */
+ }
+
+ /* Do the block being inserted, and the block it is being inserted after
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxIterator;
+
+ if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
+ {
+ pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
+ pxBlockToInsert = pxIterator;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Do the block being inserted, and the block it is being inserted before
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxBlockToInsert;
+
+ if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
+ {
+ if( pxIterator->pxNextFreeBlock != pxEnd )
+ {
+ /* Form one big block from the two blocks. */
+ pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxEnd;
+ }
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
+ }
+
+ /* If the block being inserted plugged a gab, so was merged with the block
+ * before and the block after, then it's pxNextFreeBlock pointer will have
+ * already been set, and should not be set here as that would make it point
+ * to itself. */
+ if( pxIterator != pxBlockToInsert )
+ {
+ pxIterator->pxNextFreeBlock = pxBlockToInsert;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void * pvPortMalloc( size_t xWantedSize )
+{
+ BlockLink_t * pxBlock;
+ BlockLink_t * pxPreviousBlock;
+ BlockLink_t * pxNewBlockLink;
+ void * pvReturn = NULL;
+
+ /* If this is the first call to malloc then the heap will require
+ * initialisation to setup the list of free blocks. */
+ if( pxEnd == NULL )
+ {
+ prvHeapInit();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Check the requested block size is not so large that the top bit is set.
+ * The top bit of the block size member of the BlockLink_t structure is used
+ * to determine who owns the block - the application or the kernel, so it
+ * must be free. */
+ if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
+ {
+ /* The wanted size is increased so it can contain a BlockLink_t
+ * structure in addition to the requested amount of bytes. */
+ if( xWantedSize > 0 )
+ {
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number of
+ * bytes. */
+ if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
+ secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
+ {
+ /* Traverse the list from the start (lowest address) block until
+ * one of adequate size is found. */
+ pxPreviousBlock = &xStart;
+ pxBlock = xStart.pxNextFreeBlock;
+
+ while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
+ {
+ pxPreviousBlock = pxBlock;
+ pxBlock = pxBlock->pxNextFreeBlock;
+ }
+
+ /* If the end marker was reached then a block of adequate size was
+ * not found. */
+ if( pxBlock != pxEnd )
+ {
+ /* Return the memory space pointed to - jumping over the
+ * BlockLink_t structure at its start. */
+ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
+
+ /* This block is being returned for use so must be taken out
+ * of the list of free blocks. */
+ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+ /* If the block is larger than required it can be split into
+ * two. */
+ if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
+ {
+ /* This block is to be split into two. Create a new
+ * block following the number of bytes requested. The void
+ * cast is used to prevent byte alignment warnings from the
+ * compiler. */
+ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
+ secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+
+ /* Calculate the sizes of two blocks split from the single
+ * block. */
+ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+ pxBlock->xBlockSize = xWantedSize;
+
+ /* Insert the new block into the list of free blocks. */
+ prvInsertBlockIntoFreeList( pxNewBlockLink );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xFreeBytesRemaining -= pxBlock->xBlockSize;
+
+ if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
+ {
+ xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The block is being returned - it is allocated and owned by
+ * the application and has no "next" block. */
+ pxBlock->xBlockSize |= xBlockAllocatedBit;
+ pxBlock->pxNextFreeBlock = NULL;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ traceMALLOC( pvReturn, xWantedSize );
+
+ #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
+ {
+ if( pvReturn == NULL )
+ {
+ extern void vApplicationMallocFailedHook( void );
+ vApplicationMallocFailedHook();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */
+
+ secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree( void * pv )
+{
+ uint8_t * puc = ( uint8_t * ) pv;
+ BlockLink_t * pxLink;
+
+ if( pv != NULL )
+ {
+ /* The memory being freed will have an BlockLink_t structure immediately
+ * before it. */
+ puc -= xHeapStructSize;
+
+ /* This casting is to keep the compiler from issuing warnings. */
+ pxLink = ( void * ) puc;
+
+ /* Check the block is actually allocated. */
+ secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
+ secureportASSERT( pxLink->pxNextFreeBlock == NULL );
+
+ if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
+ {
+ if( pxLink->pxNextFreeBlock == NULL )
+ {
+ /* The block is being returned to the heap - it is no longer
+ * allocated. */
+ pxLink->xBlockSize &= ~xBlockAllocatedBit;
+
+ secureportDISABLE_NON_SECURE_INTERRUPTS();
+ {
+ /* Add this block to the list of free blocks. */
+ xFreeBytesRemaining += pxLink->xBlockSize;
+ traceFREE( pv, pxLink->xBlockSize );
+ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
+ }
+ secureportENABLE_NON_SECURE_INTERRUPTS();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize( void )
+{
+ return xFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetMinimumEverFreeHeapSize( void )
+{
+ return xMinimumEverFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_heap.h b/Source/portable/IAR/ARM_CM55/secure/secure_heap.h
new file mode 100644
index 0000000..75c9cb0
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_heap.h
@@ -0,0 +1,66 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_HEAP_H__
+#define __SECURE_HEAP_H__
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/**
+ * @brief Allocates memory from heap.
+ *
+ * @param[in] xWantedSize The size of the memory to be allocated.
+ *
+ * @return Pointer to the memory region if the allocation is successful, NULL
+ * otherwise.
+ */
+void * pvPortMalloc( size_t xWantedSize );
+
+/**
+ * @brief Frees the previously allocated memory.
+ *
+ * @param[in] pv Pointer to the memory to be freed.
+ */
+void vPortFree( void * pv );
+
+/**
+ * @brief Get the free heap size.
+ *
+ * @return Free heap size.
+ */
+size_t xPortGetFreeHeapSize( void );
+
+/**
+ * @brief Get the minimum ever free heap size.
+ *
+ * @return Minimum ever free heap size.
+ */
+size_t xPortGetMinimumEverFreeHeapSize( void );
+
+#endif /* __SECURE_HEAP_H__ */
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_init.c b/Source/portable/IAR/ARM_CM55/secure/secure_init.c
new file mode 100644
index 0000000..f93bfce
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_init.c
@@ -0,0 +1,106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure init includes. */
+#include "secure_init.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
+#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
+#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
+#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
+#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define secureinitFPCCR_LSPENS_POS ( 29UL )
+#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
+#define secureinitFPCCR_TS_POS ( 26UL )
+#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
+
+#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
+#define secureinitNSACR_CP10_POS ( 10UL )
+#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
+#define secureinitNSACR_CP11_POS ( 11UL )
+#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
+ ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
+ ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
+ * permitted. CP11 should be programmed to the same value as CP10. */
+ *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
+
+ /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
+ * that we can enable/disable lazy stacking in port.c file. */
+ *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK );
+
+ /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
+ * registers (S16-S31) are also pushed to stack on exception entry and
+ * restored on exception return. */
+ *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_init.h b/Source/portable/IAR/ARM_CM55/secure/secure_init.h
new file mode 100644
index 0000000..e6c9da0
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_init.h
@@ -0,0 +1,54 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_INIT_H__
+#define __SECURE_INIT_H__
+
+/**
+ * @brief De-prioritizes the non-secure exceptions.
+ *
+ * This is needed to ensure that the non-secure PendSV runs at the lowest
+ * priority. Context switch is done in the non-secure PendSV handler.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_DePrioritizeNSExceptions( void );
+
+/**
+ * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access.
+ *
+ * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point
+ * Registers are not leaked to the non-secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_EnableNSFPUAccess( void );
+
+#endif /* __SECURE_INIT_H__ */
diff --git a/Source/portable/IAR/ARM_CM55/secure/secure_port_macros.h b/Source/portable/IAR/ARM_CM55/secure/secure_port_macros.h
new file mode 100644
index 0000000..d7ac583
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55/secure/secure_port_macros.h
@@ -0,0 +1,140 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_PORT_MACROS_H__
+#define __SECURE_PORT_MACROS_H__
+
+/**
+ * @brief Byte alignment requirements.
+ */
+#define secureportBYTE_ALIGNMENT 8
+#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
+
+/**
+ * @brief Macro to declare a function as non-secure callable.
+ */
+#if defined( __IAR_SYSTEMS_ICC__ )
+ #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root
+#else
+ #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) )
+#endif
+
+/**
+ * @brief Set the secure PRIMASK value.
+ */
+#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Set the non-secure PRIMASK value.
+ */
+#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Read the PSP value in the given variable.
+ */
+#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
+ __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
+
+/**
+ * @brief Set the PSP to the given value.
+ */
+#define secureportSET_PSP( pucCurrentStackPointer ) \
+ __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
+
+/**
+ * @brief Read the PSPLIM value in the given variable.
+ */
+#define secureportREAD_PSPLIM( pucOutStackLimit ) \
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) )
+
+/**
+ * @brief Set the PSPLIM to the given value.
+ */
+#define secureportSET_PSPLIM( pucStackLimit ) \
+ __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
+
+/**
+ * @brief Set the NonSecure MSP to the given value.
+ */
+#define secureportSET_MSP_NS( pucMainStackPointer ) \
+ __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
+
+/**
+ * @brief Set the CONTROL register to the given value.
+ */
+#define secureportSET_CONTROL( ulControl ) \
+ __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
+
+/**
+ * @brief Read the Interrupt Program Status Register (IPSR) value in the given
+ * variable.
+ */
+#define secureportREAD_IPSR( ulIPSR ) \
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
+
+/**
+ * @brief PRIMASK value to enable interrupts.
+ */
+#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
+
+/**
+ * @brief PRIMASK value to disable interrupts.
+ */
+#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
+
+/**
+ * @brief Disable secure interrupts.
+ */
+#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Disable non-secure interrupts.
+ *
+ * This effectively disables context switches.
+ */
+#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Enable non-secure interrupts.
+ */
+#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Assert definition.
+ */
+#define secureportASSERT( x ) \
+ if( ( x ) == 0 ) \
+ { \
+ secureportDISABLE_SECURE_INTERRUPTS(); \
+ secureportDISABLE_NON_SECURE_INTERRUPTS(); \
+ for( ; ; ) {; } \
+ }
+
+#endif /* __SECURE_PORT_MACROS_H__ */
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
new file mode 100644
index 0000000..00ee5a5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
@@ -0,0 +1,402 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+/*-----------------------------------------------------------*/
+
+/*---------------- Unprivileged Functions -------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+xIsPrivileged:
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vResetPrivilege:
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+/*----------------- Privileged Functions --------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ msr psplim, r1 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r2 /* Finally, branch to EXC_RETURN. */
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+vRaisePrivilege:
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vStartFirstTask:
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
+/*-----------------------------------------------------------*/
+
+ulSetInterruptMask:
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vClearInterruptMask:
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+PendSV_Handler:
+ mrs r0, psp /* Read PSP in r0. */
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+ mrs r2, psplim /* r2 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ str r0, [r1] /* Save the new top of stack in TCB. */
+
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+
+ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+ msr psplim, r2 /* Restore the PSPLIM register value for the task. */
+ msr psp, r0 /* Remember the new top of stack for the task. */
+ bx r3
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h
new file mode 100644
index 0000000..15cb65e
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h
@@ -0,0 +1,85 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M55"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __root
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
+ * the source code because to do so would cause other compilers to generate
+ * warnings. */
+#pragma diag_suppress=Be006
+#pragma diag_suppress=Pa082
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM7/ReadMe.txt b/Source/portable/IAR/ARM_CM7/ReadMe.txt
index 5ecbe81..9cc851e 100644
--- a/Source/portable/IAR/ARM_CM7/ReadMe.txt
+++ b/Source/portable/IAR/ARM_CM7/ReadMe.txt
@@ -1,8 +1,8 @@
There are two options for running FreeRTOS on ARM Cortex-M7 microcontrollers.
The best option depends on the revision of the ARM Cortex-M7 core in use. The
revision is specified by an 'r' number, and a 'p' number, so will look something
-like 'r0p1'. Check the documentation for the microcontroller in use to find the
-revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
+like 'r0p1'. Check the documentation for the microcontroller in use to find the
+revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
the FreeRTOS port provided specifically for r0p1 revisions, as that can be used
with all core revisions.
@@ -10,9 +10,9 @@
use the Cortex-M7 r0p1 port - the latter containing a minor errata workaround.
If the revision of the ARM Cortex-M7 core is not r0p1 then either option can be
-used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
+used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
the /FreeRTOS/Source/portable/IAR/ARM_CM4F directory.
If the revision of the ARM Cortex-M7 core is r0p1 then use the FreeRTOS ARM
Cortex-M7 r0p1 port located in the /FreeRTOS/Source/portable/IAR/ARM_CM7/r0p1
-directory.
\ No newline at end of file
+directory.
diff --git a/Source/portable/IAR/ARM_CM7/r0p1/port.c b/Source/portable/IAR/ARM_CM7/r0p1/port.c
index d360131..a04532b 100644
--- a/Source/portable/IAR/ARM_CM7/r0p1/port.c
+++ b/Source/portable/IAR/ARM_CM7/r0p1/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -59,8 +59,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -233,13 +234,10 @@
*/
BaseType_t xPortStartScheduler( void )
{
- /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
- * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
- configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
-
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -249,7 +247,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -261,33 +259,53 @@
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -296,7 +314,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -656,10 +674,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/IAR/ARM_CM7/r0p1/portasm.s b/Source/portable/IAR/ARM_CM7/r0p1/portasm.s
index 521d8b4..19cc6cd 100644
--- a/Source/portable/IAR/ARM_CM7/r0p1/portasm.s
+++ b/Source/portable/IAR/ARM_CM7/r0p1/portasm.s
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -28,125 +28,124 @@
#include <FreeRTOSConfig.h>
- RSEG CODE:CODE(2)
- thumb
+ RSEG CODE:CODE(2)
+ thumb
- EXTERN pxCurrentTCB
- EXTERN vTaskSwitchContext
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
- PUBLIC xPortPendSVHandler
- PUBLIC vPortSVCHandler
- PUBLIC vPortStartFirstTask
- PUBLIC vPortEnableVFP
+ PUBLIC xPortPendSVHandler
+ PUBLIC vPortSVCHandler
+ PUBLIC vPortStartFirstTask
+ PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
- mrs r0, psp
- isb
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r2, [r3]
+ mrs r0, psp
+ isb
+ /* Get the location of the current TCB. */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3]
- /* Is the task using the FPU context? If so, push high vfp registers. */
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ /* Is the task using the FPU context? If so, push high vfp registers. */
+ tst r14, #0x10
+ it eq
+ vstmdbeq r0!, {s16-s31}
- /* Save the core registers. */
- stmdb r0!, {r4-r11, r14}
+ /* Save the core registers. */
+ stmdb r0!, {r4-r11, r14}
- /* Save the new top of stack into the first member of the TCB. */
- str r0, [r2]
+ /* Save the new top of stack into the first member of the TCB. */
+ str r0, [r2]
- stmdb sp!, {r0, r3}
- mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- cpsid i
- msr basepri, r0
- dsb
- isb
- cpsie i
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r0, r3}
+ stmdb sp!, {r0, r3}
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ cpsid i
+ msr basepri, r0
+ dsb
+ isb
+ cpsie i
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
+ ldmia sp!, {r0, r3}
- /* The first item in pxCurrentTCB is the task top of stack. */
- ldr r1, [r3]
- ldr r0, [r1]
+ /* The first item in pxCurrentTCB is the task top of stack. */
+ ldr r1, [r3]
+ ldr r0, [r1]
- /* Pop the core registers. */
- ldmia r0!, {r4-r11, r14}
+ /* Pop the core registers. */
+ ldmia r0!, {r4-r11, r14}
- /* Is the task using the FPU context? If so, pop the high vfp registers
- too. */
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ /* Is the task using the FPU context? If so, pop the high vfp registers
+ too. */
+ tst r14, #0x10
+ it eq
+ vldmiaeq r0!, {s16-s31}
- msr psp, r0
- isb
- #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
- #if WORKAROUND_PMU_CM001 == 1
- push { r14 }
- pop { pc }
- #endif
- #endif
+ msr psp, r0
+ isb
+ #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
+ #if WORKAROUND_PMU_CM001 == 1
+ push { r14 }
+ pop { pc }
+ #endif
+ #endif
- bx r14
+ bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r1, [r3]
- ldr r0, [r1]
- /* Pop the core registers. */
- ldmia r0!, {r4-r11, r14}
- msr psp, r0
- isb
- mov r0, #0
- msr basepri, r0
- bx r14
+ /* Get the location of the current TCB. */
+ ldr r3, =pxCurrentTCB
+ ldr r1, [r3]
+ ldr r0, [r1]
+ /* Pop the core registers. */
+ ldmia r0!, {r4-r11, r14}
+ msr psp, r0
+ isb
+ mov r0, #0
+ msr basepri, r0
+ bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Clear the bit that indicates the FPU is in use in case the FPU was used
- before the scheduler was started - which would otherwise result in the
- unnecessary leaving of space in the SVC stack for lazy saving of FPU
- registers. */
- mov r0, #0
- msr control, r0
- /* Call SVC to start the first task. */
- cpsie i
- cpsie f
- dsb
- isb
- svc 0
+ /* Use the NVIC offset register to locate the stack. */
+ ldr r0, =0xE000ED08
+ ldr r0, [r0]
+ ldr r0, [r0]
+ /* Set the msp back to the start of the stack. */
+ msr msp, r0
+ /* Clear the bit that indicates the FPU is in use in case the FPU was used
+ before the scheduler was started - which would otherwise result in the
+ unnecessary leaving of space in the SVC stack for lazy saving of FPU
+ registers. */
+ mov r0, #0
+ msr control, r0
+ /* Call SVC to start the first task. */
+ cpsie i
+ cpsie f
+ dsb
+ isb
+ svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
- /* The FPU enable bits are in the CPACR. */
- ldr.w r0, =0xE000ED88
- ldr r1, [r0]
+ /* The FPU enable bits are in the CPACR. */
+ ldr.w r0, =0xE000ED88
+ ldr r1, [r0]
- /* Enable CP10 and CP11 coprocessors, then save back. */
- orr r1, r1, #( 0xf << 20 )
- str r1, [r0]
- bx r14
+ /* Enable CP10 and CP11 coprocessors, then save back. */
+ orr r1, r1, #( 0xf << 20 )
+ str r1, [r0]
+ bx r14
- END
-
+ END
diff --git a/Source/portable/IAR/ARM_CM7/r0p1/portmacro.h b/Source/portable/IAR/ARM_CM7/r0p1/portmacro.h
index db95f29..f93146e 100644
--- a/Source/portable/IAR/ARM_CM7/r0p1/portmacro.h
+++ b/Source/portable/IAR/ARM_CM7/r0p1/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -29,9 +29,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -59,16 +61,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -203,8 +207,10 @@
#pragma diag_suppress=Pe191
#pragma diag_suppress=Pa082
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/port.c b/Source/portable/IAR/ARM_CM85/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/portasm.h b/Source/portable/IAR/ARM_CM85/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/portasm.s b/Source/portable/IAR/ARM_CM85/non_secure/portasm.s
new file mode 100644
index 0000000..5309103
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/portasm.s
@@ -0,0 +1,496 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN xSecureContext
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+ EXTERN SecureContext_SaveContext
+ EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vPortAllocateSecureContext
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+ PUBLIC vPortFreeSecureContext
+/*-----------------------------------------------------------*/
+
+/*---------------- Unprivileged Functions -------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+xIsPrivileged:
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vResetPrivilege:
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vPortAllocateSecureContext:
+ svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+/*----------------- Privileged Functions --------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r3, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ ldr r4, =xSecureContext
+ str r1, [r4] /* Set xSecureContext to this task's value for the same. */
+ msr psplim, r2 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r3 /* Finally, branch to EXC_RETURN. */
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+vRaisePrivilege:
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vStartFirstTask:
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
+/*-----------------------------------------------------------*/
+
+ulSetInterruptMask:
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vClearInterruptMask:
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ mrs r2, psp /* Read PSP in r2. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ push {r0-r2, r14}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* LR = r3. */
+ lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+ b select_next_task
+
+ save_ns_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ str r2, [r1] /* Save the new top of stack in TCB. */
+ adds r2, r2, #12 /* r2 = r2 + 12. */
+ stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
+ mrs r1, psplim /* r1 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ subs r2, r2, #12 /* r2 = r2 - 12. */
+ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
+
+ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ msr psplim, r1 /* Restore the PSPLIM register value for the task. */
+ mov lr, r4 /* LR = r4. */
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r3] /* Restore the task's xSecureContext. */
+ cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB. */
+ push {r2, r4}
+ bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r2, r4}
+ mov lr, r4 /* LR = r4. */
+ lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+ restore_ns_context:
+ ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ msr psp, r2 /* Remember the new top of stack for the task. */
+ bx lr
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+vPortFreeSecureContext:
+ /* r0 = uint32_t *pulTCB. */
+ ldr r2, [r0] /* The first item in the TCB is the top of the stack. */
+ ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
+ cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
+ it ne
+ svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM85/non_secure/portmacro.h
new file mode 100644
index 0000000..ee5baf1
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/portmacro.h
@@ -0,0 +1,85 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M85"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __root
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
+ * the source code because to do so would cause other compilers to generate
+ * warnings. */
+#pragma diag_suppress=Be006
+#pragma diag_suppress=Pa082
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_context.c b/Source/portable/IAR/ARM_CM85/secure/secure_context.c
new file mode 100644
index 0000000..e37dd96
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_context.c
@@ -0,0 +1,351 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Secure context includes. */
+#include "secure_context.h"
+
+/* Secure heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief CONTROL value for privileged tasks.
+ *
+ * Bit[0] - 0 --> Thread mode is privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
+
+/**
+ * @brief CONTROL value for un-privileged tasks.
+ *
+ * Bit[0] - 1 --> Thread mode is un-privileged.
+ * Bit[1] - 1 --> Thread mode uses PSP.
+ */
+#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
+
+/**
+ * @brief Size of stack seal values in bytes.
+ */
+#define securecontextSTACK_SEAL_SIZE 8
+
+/**
+ * @brief Stack seal value as recommended by ARM.
+ */
+#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5
+
+/**
+ * @brief Maximum number of secure contexts.
+ */
+#ifndef secureconfigMAX_SECURE_CONTEXTS
+ #define secureconfigMAX_SECURE_CONTEXTS 8UL
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Pre-allocated array of secure contexts.
+ */
+SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ];
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Get a free secure context for a task from the secure context pool (xSecureContexts).
+ *
+ * This function ensures that only one secure context is allocated for a task.
+ *
+ * @param[in] pvTaskHandle The task handle for which the secure context is allocated.
+ *
+ * @return Index of a free secure context in the xSecureContexts array.
+ */
+static uint32_t ulGetSecureContext( void * pvTaskHandle );
+
+/**
+ * @brief Return the secure context to the secure context pool (xSecureContexts).
+ *
+ * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array.
+ */
+static void vReturnSecureContext( uint32_t ulSecureContextIndex );
+
+/* These are implemented in assembly. */
+extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext );
+extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext );
+/*-----------------------------------------------------------*/
+
+static uint32_t ulGetSecureContext( void * pvTaskHandle )
+{
+ /* Start with invalid index. */
+ uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) &&
+ ( xSecureContexts[ i ].pucStackLimit == NULL ) &&
+ ( xSecureContexts[ i ].pucStackStart == NULL ) &&
+ ( xSecureContexts[ i ].pvTaskHandle == NULL ) &&
+ ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = i;
+ }
+ else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle )
+ {
+ /* A task can only have one secure context. Do not allocate a second
+ * context for the same task. */
+ ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS;
+ break;
+ }
+ }
+
+ return ulSecureContextIndex;
+}
+/*-----------------------------------------------------------*/
+
+static void vReturnSecureContext( uint32_t ulSecureContextIndex )
+{
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL;
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
+{
+ uint32_t ulIPSR, i;
+ static uint32_t ulSecureContextsInitialized = 0;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) )
+ {
+ /* Ensure to initialize secure contexts only once. */
+ ulSecureContextsInitialized = 1;
+
+ /* No stack for thread mode until a task's context is loaded. */
+ secureportSET_PSPLIM( securecontextNO_STACK );
+ secureportSET_PSP( securecontextNO_STACK );
+
+ /* Initialize all secure contexts. */
+ for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ )
+ {
+ xSecureContexts[ i ].pucCurrentStackPointer = NULL;
+ xSecureContexts[ i ].pucStackLimit = NULL;
+ xSecureContexts[ i ].pucStackStart = NULL;
+ xSecureContexts[ i ].pvTaskHandle = NULL;
+ }
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Configure thread mode to use PSP and to be unprivileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Configure thread mode to use PSP and to be privileged. */
+ secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
+ }
+ #endif /* configENABLE_MPU */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle )
+#else /* configENABLE_MPU */
+ secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle )
+#endif /* configENABLE_MPU */
+{
+ uint8_t * pucStackMemory = NULL;
+ uint8_t * pucStackLimit;
+ uint32_t ulIPSR, ulSecureContextIndex;
+ SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID;
+
+ #if ( configENABLE_MPU == 1 )
+ uint32_t * pulCurrentStackPointer = NULL;
+ #endif /* configENABLE_MPU */
+
+ /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit
+ * Register (PSPLIM) value. */
+ secureportREAD_IPSR( ulIPSR );
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode.
+ * Also do nothing, if a secure context us already loaded. PSPLIM is set to
+ * securecontextNO_STACK when no secure context is loaded. */
+ if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) )
+ {
+ /* Ontain a free secure context. */
+ ulSecureContextIndex = ulGetSecureContext( pvTaskHandle );
+
+ /* Were we able to get a free context? */
+ if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS )
+ {
+ /* Allocate the stack space. */
+ pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE );
+
+ if( pucStackMemory != NULL )
+ {
+ /* Since stack grows down, the starting point will be the last
+ * location. Note that this location is next to the last
+ * allocated byte for stack (excluding the space for seal values)
+ * because the hardware decrements the stack pointer before
+ * writing i.e. if stack pointer is 0x2, a push operation will
+ * decrement the stack pointer to 0x1 and then write at 0x1. */
+ xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize;
+
+ /* Seal the created secure process stack. */
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE;
+ *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE;
+
+ /* The stack cannot go beyond this location. This value is
+ * programmed in the PSPLIM register on context switch.*/
+ xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory;
+
+ xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Store the correct CONTROL value for the task on the stack.
+ * This value is programmed in the CONTROL register on
+ * context switch. */
+ pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ pulCurrentStackPointer--;
+
+ if( ulIsTaskPrivileged )
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
+ }
+ else
+ {
+ *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
+ }
+
+ /* Store the current stack pointer. This value is programmed in
+ * the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
+ }
+ #else /* configENABLE_MPU */
+ {
+ /* Current SP is set to the starting of the stack. This
+ * value programmed in the PSP register on context switch. */
+ xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart;
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Ensure to never return 0 as a valid context handle. */
+ xSecureContextHandle = ulSecureContextIndex + 1UL;
+ }
+ }
+ }
+
+ return xSecureContextHandle;
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint32_t ulIPSR, ulSecureContextIndex;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* Only free if a valid context handle is passed. */
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ /* Ensure that the secure context being deleted is associated with
+ * the task. */
+ if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle )
+ {
+ /* Free the stack space. */
+ vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit );
+
+ /* Return the secure context back to the free secure contexts pool. */
+ vReturnSecureContext( ulSecureContextIndex );
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that no secure context is loaded and the task is loading it's
+ * own context. */
+ if( ( pucStackLimit == securecontextNO_STACK ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle )
+{
+ uint8_t * pucStackLimit;
+ uint32_t ulSecureContextIndex;
+
+ if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) )
+ {
+ ulSecureContextIndex = xSecureContextHandle - 1UL;
+
+ secureportREAD_PSPLIM( pucStackLimit );
+
+ /* Ensure that task's context is loaded and the task is saving it's own
+ * context. */
+ if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) &&
+ ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) )
+ {
+ SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) );
+ }
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_context.h b/Source/portable/IAR/ARM_CM85/secure/secure_context.h
new file mode 100644
index 0000000..2220ea6
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_context.h
@@ -0,0 +1,135 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_CONTEXT_H__
+#define __SECURE_CONTEXT_H__
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* FreeRTOS includes. */
+#include "FreeRTOSConfig.h"
+
+/**
+ * @brief PSP value when no secure context is loaded.
+ */
+#define securecontextNO_STACK 0x0
+
+/**
+ * @brief Invalid context ID.
+ */
+#define securecontextINVALID_CONTEXT_ID 0UL
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Structure to represent a secure context.
+ *
+ * @note Since stack grows down, pucStackStart is the highest address while
+ * pucStackLimit is the first address of the allocated memory.
+ */
+typedef struct SecureContext
+{
+ uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
+ uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
+ uint8_t * pucStackStart; /**< First location of the stack memory. */
+ void * pvTaskHandle; /**< Task handle of the task this context is associated with. */
+} SecureContext_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Opaque handle for a secure context.
+ */
+typedef uint32_t SecureContextHandle_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Initializes the secure context management system.
+ *
+ * PSP is set to NULL and therefore a task must allocate and load a context
+ * before calling any secure side function in the thread mode.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureContext_Init( void );
+
+/**
+ * @brief Allocates a context on the secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] ulSecureStackSize Size of the stack to allocate on secure side.
+ * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise.
+ *
+ * @return Opaque context handle if context is successfully allocated, NULL
+ * otherwise.
+ */
+#if ( configENABLE_MPU == 1 )
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ uint32_t ulIsTaskPrivileged,
+ void * pvTaskHandle );
+#else /* configENABLE_MPU */
+ SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize,
+ void * pvTaskHandle );
+#endif /* configENABLE_MPU */
+
+/**
+ * @brief Frees the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the
+ * context to be freed.
+ */
+void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Loads the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be loaded.
+ */
+void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+/**
+ * @brief Saves the given context.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ *
+ * @param[in] xSecureContextHandle Context handle corresponding to the context
+ * to be saved.
+ */
+void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle );
+
+#endif /* __SECURE_CONTEXT_H__ */
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s b/Source/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s
new file mode 100644
index 0000000..0da3e0f
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s
@@ -0,0 +1,86 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+ PUBLIC SecureContext_LoadContextAsm
+ PUBLIC SecureContext_SaveContextAsm
+/*-----------------------------------------------------------*/
+
+SecureContext_LoadContextAsm:
+ /* pxSecureContext value is in r0. */
+ mrs r1, ipsr /* r1 = IPSR. */
+ cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
+ ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */
+
+#if ( configENABLE_MPU == 1 )
+ ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
+ msr control, r3 /* CONTROL = r3. */
+#endif /* configENABLE_MPU */
+
+ msr psplim, r2 /* PSPLIM = r2. */
+ msr psp, r1 /* PSP = r1. */
+
+ load_ctx_therad_mode:
+ bx lr
+/*-----------------------------------------------------------*/
+
+SecureContext_SaveContextAsm:
+ /* pxSecureContext value is in r0. */
+ mrs r1, ipsr /* r1 = IPSR. */
+ cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
+ mrs r1, psp /* r1 = PSP. */
+
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */
+ vldmia r1!, {s0} /* Nullify the effect of the previous statement. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+#if ( configENABLE_MPU == 1 )
+ mrs r2, control /* r2 = CONTROL. */
+ stmdb r1!, {r2} /* Store CONTROL value on the stack. */
+#endif /* configENABLE_MPU */
+
+ str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */
+ movs r1, #0 /* r1 = securecontextNO_STACK. */
+ msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
+ msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
+
+ save_ctx_therad_mode:
+ bx lr
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_heap.c b/Source/portable/IAR/ARM_CM85/secure/secure_heap.c
new file mode 100644
index 0000000..19f7c23
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_heap.c
@@ -0,0 +1,454 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure context heap includes. */
+#include "secure_heap.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Total heap size.
+ */
+#ifndef secureconfigTOTAL_HEAP_SIZE
+ #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
+#endif
+
+/* No test marker by default. */
+#ifndef mtCOVERAGE_TEST_MARKER
+ #define mtCOVERAGE_TEST_MARKER()
+#endif
+
+/* No tracing by default. */
+#ifndef traceMALLOC
+ #define traceMALLOC( pvReturn, xWantedSize )
+#endif
+
+/* No tracing by default. */
+#ifndef traceFREE
+ #define traceFREE( pv, xBlockSize )
+#endif
+
+/* Block sizes must not get too small. */
+#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
+
+/* Assumes 8bit bytes! */
+#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
+/*-----------------------------------------------------------*/
+
+/* Allocate the memory for the heap. */
+#if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
+
+/* The application writer has already defined the array used for the RTOS
+* heap - probably so it can be placed in a special segment or address. */
+ extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#else /* configAPPLICATION_ALLOCATED_HEAP */
+ static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/**
+ * @brief The linked list structure.
+ *
+ * This is used to link free blocks in order of their memory address.
+ */
+typedef struct A_BLOCK_LINK
+{
+ struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
+ size_t xBlockSize; /**< The size of the free block. */
+} BlockLink_t;
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Called automatically to setup the required heap structures the first
+ * time pvPortMalloc() is called.
+ */
+static void prvHeapInit( void );
+
+/**
+ * @brief Inserts a block of memory that is being freed into the correct
+ * position in the list of free memory blocks.
+ *
+ * The block being freed will be merged with the block in front it and/or the
+ * block behind it if the memory blocks are adjacent to each other.
+ *
+ * @param[in] pxBlockToInsert The block being freed.
+ */
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert );
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The size of the structure placed at the beginning of each allocated
+ * memory block must by correctly byte aligned.
+ */
+static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+
+/**
+ * @brief Create a couple of list links to mark the start and end of the list.
+ */
+static BlockLink_t xStart;
+static BlockLink_t * pxEnd = NULL;
+
+/**
+ * @brief Keeps track of the number of free bytes remaining, but says nothing
+ * about fragmentation.
+ */
+static size_t xFreeBytesRemaining = 0U;
+static size_t xMinimumEverFreeBytesRemaining = 0U;
+
+/**
+ * @brief Gets set to the top bit of an size_t type.
+ *
+ * When this bit in the xBlockSize member of an BlockLink_t structure is set
+ * then the block belongs to the application. When the bit is free the block is
+ * still part of the free heap space.
+ */
+static size_t xBlockAllocatedBit = 0;
+/*-----------------------------------------------------------*/
+
+static void prvHeapInit( void )
+{
+ BlockLink_t * pxFirstFreeBlock;
+ uint8_t * pucAlignedHeap;
+ size_t uxAddress;
+ size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
+
+ /* Ensure the heap starts on a correctly aligned boundary. */
+ uxAddress = ( size_t ) ucHeap;
+
+ if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
+ {
+ uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
+ }
+
+ pucAlignedHeap = ( uint8_t * ) uxAddress;
+
+ /* xStart is used to hold a pointer to the first item in the list of free
+ * blocks. The void cast is used to prevent compiler warnings. */
+ xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
+ xStart.xBlockSize = ( size_t ) 0;
+
+ /* pxEnd is used to mark the end of the list of free blocks and is inserted
+ * at the end of the heap space. */
+ uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
+ uxAddress -= xHeapStructSize;
+ uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
+ pxEnd = ( void * ) uxAddress;
+ pxEnd->xBlockSize = 0;
+ pxEnd->pxNextFreeBlock = NULL;
+
+ /* To start with there is a single free block that is sized to take up the
+ * entire heap space, minus the space taken by pxEnd. */
+ pxFirstFreeBlock = ( void * ) pucAlignedHeap;
+ pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
+ pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
+
+ /* Only one block exists - and it covers the entire usable heap space. */
+ xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+ xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+
+ /* Work out the position of the top bit in a size_t variable. */
+ xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
+}
+/*-----------------------------------------------------------*/
+
+static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert )
+{
+ BlockLink_t * pxIterator;
+ uint8_t * puc;
+
+ /* Iterate through the list until a block is found that has a higher address
+ * than the block being inserted. */
+ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
+ {
+ /* Nothing to do here, just iterate to the right position. */
+ }
+
+ /* Do the block being inserted, and the block it is being inserted after
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxIterator;
+
+ if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
+ {
+ pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
+ pxBlockToInsert = pxIterator;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Do the block being inserted, and the block it is being inserted before
+ * make a contiguous block of memory? */
+ puc = ( uint8_t * ) pxBlockToInsert;
+
+ if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
+ {
+ if( pxIterator->pxNextFreeBlock != pxEnd )
+ {
+ /* Form one big block from the two blocks. */
+ pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxEnd;
+ }
+ }
+ else
+ {
+ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
+ }
+
+ /* If the block being inserted plugged a gab, so was merged with the block
+ * before and the block after, then it's pxNextFreeBlock pointer will have
+ * already been set, and should not be set here as that would make it point
+ * to itself. */
+ if( pxIterator != pxBlockToInsert )
+ {
+ pxIterator->pxNextFreeBlock = pxBlockToInsert;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void * pvPortMalloc( size_t xWantedSize )
+{
+ BlockLink_t * pxBlock;
+ BlockLink_t * pxPreviousBlock;
+ BlockLink_t * pxNewBlockLink;
+ void * pvReturn = NULL;
+
+ /* If this is the first call to malloc then the heap will require
+ * initialisation to setup the list of free blocks. */
+ if( pxEnd == NULL )
+ {
+ prvHeapInit();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Check the requested block size is not so large that the top bit is set.
+ * The top bit of the block size member of the BlockLink_t structure is used
+ * to determine who owns the block - the application or the kernel, so it
+ * must be free. */
+ if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
+ {
+ /* The wanted size is increased so it can contain a BlockLink_t
+ * structure in addition to the requested amount of bytes. */
+ if( xWantedSize > 0 )
+ {
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number of
+ * bytes. */
+ if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
+ secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
+ {
+ /* Traverse the list from the start (lowest address) block until
+ * one of adequate size is found. */
+ pxPreviousBlock = &xStart;
+ pxBlock = xStart.pxNextFreeBlock;
+
+ while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
+ {
+ pxPreviousBlock = pxBlock;
+ pxBlock = pxBlock->pxNextFreeBlock;
+ }
+
+ /* If the end marker was reached then a block of adequate size was
+ * not found. */
+ if( pxBlock != pxEnd )
+ {
+ /* Return the memory space pointed to - jumping over the
+ * BlockLink_t structure at its start. */
+ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
+
+ /* This block is being returned for use so must be taken out
+ * of the list of free blocks. */
+ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+ /* If the block is larger than required it can be split into
+ * two. */
+ if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
+ {
+ /* This block is to be split into two. Create a new
+ * block following the number of bytes requested. The void
+ * cast is used to prevent byte alignment warnings from the
+ * compiler. */
+ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
+ secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
+
+ /* Calculate the sizes of two blocks split from the single
+ * block. */
+ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+ pxBlock->xBlockSize = xWantedSize;
+
+ /* Insert the new block into the list of free blocks. */
+ prvInsertBlockIntoFreeList( pxNewBlockLink );
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xFreeBytesRemaining -= pxBlock->xBlockSize;
+
+ if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
+ {
+ xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The block is being returned - it is allocated and owned by
+ * the application and has no "next" block. */
+ pxBlock->xBlockSize |= xBlockAllocatedBit;
+ pxBlock->pxNextFreeBlock = NULL;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ traceMALLOC( pvReturn, xWantedSize );
+
+ #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
+ {
+ if( pvReturn == NULL )
+ {
+ extern void vApplicationMallocFailedHook( void );
+ vApplicationMallocFailedHook();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */
+
+ secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree( void * pv )
+{
+ uint8_t * puc = ( uint8_t * ) pv;
+ BlockLink_t * pxLink;
+
+ if( pv != NULL )
+ {
+ /* The memory being freed will have an BlockLink_t structure immediately
+ * before it. */
+ puc -= xHeapStructSize;
+
+ /* This casting is to keep the compiler from issuing warnings. */
+ pxLink = ( void * ) puc;
+
+ /* Check the block is actually allocated. */
+ secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
+ secureportASSERT( pxLink->pxNextFreeBlock == NULL );
+
+ if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
+ {
+ if( pxLink->pxNextFreeBlock == NULL )
+ {
+ /* The block is being returned to the heap - it is no longer
+ * allocated. */
+ pxLink->xBlockSize &= ~xBlockAllocatedBit;
+
+ secureportDISABLE_NON_SECURE_INTERRUPTS();
+ {
+ /* Add this block to the list of free blocks. */
+ xFreeBytesRemaining += pxLink->xBlockSize;
+ traceFREE( pv, pxLink->xBlockSize );
+ prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
+ }
+ secureportENABLE_NON_SECURE_INTERRUPTS();
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize( void )
+{
+ return xFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetMinimumEverFreeHeapSize( void )
+{
+ return xMinimumEverFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_heap.h b/Source/portable/IAR/ARM_CM85/secure/secure_heap.h
new file mode 100644
index 0000000..75c9cb0
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_heap.h
@@ -0,0 +1,66 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_HEAP_H__
+#define __SECURE_HEAP_H__
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/**
+ * @brief Allocates memory from heap.
+ *
+ * @param[in] xWantedSize The size of the memory to be allocated.
+ *
+ * @return Pointer to the memory region if the allocation is successful, NULL
+ * otherwise.
+ */
+void * pvPortMalloc( size_t xWantedSize );
+
+/**
+ * @brief Frees the previously allocated memory.
+ *
+ * @param[in] pv Pointer to the memory to be freed.
+ */
+void vPortFree( void * pv );
+
+/**
+ * @brief Get the free heap size.
+ *
+ * @return Free heap size.
+ */
+size_t xPortGetFreeHeapSize( void );
+
+/**
+ * @brief Get the minimum ever free heap size.
+ *
+ * @return Minimum ever free heap size.
+ */
+size_t xPortGetMinimumEverFreeHeapSize( void );
+
+#endif /* __SECURE_HEAP_H__ */
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_init.c b/Source/portable/IAR/ARM_CM85/secure/secure_init.c
new file mode 100644
index 0000000..f93bfce
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_init.c
@@ -0,0 +1,106 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdint.h>
+
+/* Secure init includes. */
+#include "secure_init.h"
+
+/* Secure port macros. */
+#include "secure_port_macros.h"
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
+#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
+#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
+#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
+#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define secureinitFPCCR_LSPENS_POS ( 29UL )
+#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
+#define secureinitFPCCR_TS_POS ( 26UL )
+#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
+
+#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
+#define secureinitNSACR_CP10_POS ( 10UL )
+#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
+#define secureinitNSACR_CP11_POS ( 11UL )
+#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
+ ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
+ ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
+
+secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
+{
+ uint32_t ulIPSR;
+
+ /* Read the Interrupt Program Status Register (IPSR) value. */
+ secureportREAD_IPSR( ulIPSR );
+
+ /* Do nothing if the processor is running in the Thread Mode. IPSR is zero
+ * when the processor is running in the Thread Mode. */
+ if( ulIPSR != 0 )
+ {
+ /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
+ * permitted. CP11 should be programmed to the same value as CP10. */
+ *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
+
+ /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
+ * that we can enable/disable lazy stacking in port.c file. */
+ *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK );
+
+ /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
+ * registers (S16-S31) are also pushed to stack on exception entry and
+ * restored on exception return. */
+ *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
+ }
+}
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_init.h b/Source/portable/IAR/ARM_CM85/secure/secure_init.h
new file mode 100644
index 0000000..e6c9da0
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_init.h
@@ -0,0 +1,54 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_INIT_H__
+#define __SECURE_INIT_H__
+
+/**
+ * @brief De-prioritizes the non-secure exceptions.
+ *
+ * This is needed to ensure that the non-secure PendSV runs at the lowest
+ * priority. Context switch is done in the non-secure PendSV handler.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_DePrioritizeNSExceptions( void );
+
+/**
+ * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access.
+ *
+ * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point
+ * Registers are not leaked to the non-secure side.
+ *
+ * @note This function must be called in the handler mode. It is no-op if called
+ * in the thread mode.
+ */
+void SecureInit_EnableNSFPUAccess( void );
+
+#endif /* __SECURE_INIT_H__ */
diff --git a/Source/portable/IAR/ARM_CM85/secure/secure_port_macros.h b/Source/portable/IAR/ARM_CM85/secure/secure_port_macros.h
new file mode 100644
index 0000000..d7ac583
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85/secure/secure_port_macros.h
@@ -0,0 +1,140 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __SECURE_PORT_MACROS_H__
+#define __SECURE_PORT_MACROS_H__
+
+/**
+ * @brief Byte alignment requirements.
+ */
+#define secureportBYTE_ALIGNMENT 8
+#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
+
+/**
+ * @brief Macro to declare a function as non-secure callable.
+ */
+#if defined( __IAR_SYSTEMS_ICC__ )
+ #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root
+#else
+ #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) )
+#endif
+
+/**
+ * @brief Set the secure PRIMASK value.
+ */
+#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Set the non-secure PRIMASK value.
+ */
+#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
+ __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
+
+/**
+ * @brief Read the PSP value in the given variable.
+ */
+#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
+ __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
+
+/**
+ * @brief Set the PSP to the given value.
+ */
+#define secureportSET_PSP( pucCurrentStackPointer ) \
+ __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
+
+/**
+ * @brief Read the PSPLIM value in the given variable.
+ */
+#define secureportREAD_PSPLIM( pucOutStackLimit ) \
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) )
+
+/**
+ * @brief Set the PSPLIM to the given value.
+ */
+#define secureportSET_PSPLIM( pucStackLimit ) \
+ __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
+
+/**
+ * @brief Set the NonSecure MSP to the given value.
+ */
+#define secureportSET_MSP_NS( pucMainStackPointer ) \
+ __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
+
+/**
+ * @brief Set the CONTROL register to the given value.
+ */
+#define secureportSET_CONTROL( ulControl ) \
+ __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
+
+/**
+ * @brief Read the Interrupt Program Status Register (IPSR) value in the given
+ * variable.
+ */
+#define secureportREAD_IPSR( ulIPSR ) \
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
+
+/**
+ * @brief PRIMASK value to enable interrupts.
+ */
+#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
+
+/**
+ * @brief PRIMASK value to disable interrupts.
+ */
+#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
+
+/**
+ * @brief Disable secure interrupts.
+ */
+#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Disable non-secure interrupts.
+ *
+ * This effectively disables context switches.
+ */
+#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Enable non-secure interrupts.
+ */
+#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
+
+/**
+ * @brief Assert definition.
+ */
+#define secureportASSERT( x ) \
+ if( ( x ) == 0 ) \
+ { \
+ secureportDISABLE_SECURE_INTERRUPTS(); \
+ secureportDISABLE_NON_SECURE_INTERRUPTS(); \
+ for( ; ; ) {; } \
+ }
+
+#endif /* __SECURE_PORT_MACROS_H__ */
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..ef180bd
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1336 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyEntry
+MPU_xTaskGenericNotifyEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWaitEntry
+MPU_xTaskGenericNotifyWaitEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommandEntry
+MPU_xTimerGenericCommandEntry:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBitsEntry
+MPU_xEventGroupWaitBitsEntry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandPrivImpl
+MPU_xTimerGenericCommandPrivImpl:
+ b MPU_xTimerGenericCommandPrivImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
new file mode 100644
index 0000000..9712ac3
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
@@ -0,0 +1,2043 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+/* MPU includes. */
+#include "mpu_wrappers.h"
+#include "mpu_syscall_numbers.h"
+
+/* Portasm includes. */
+#include "portasm.h"
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ /* Secure components includes. */
+ #include "secure_context.h"
+ #include "secure_init.h"
+#endif /* configENABLE_TRUSTZONE */
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/**
+ * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
+ * i.e. the processor boots as secure and never jumps to the non-secure side.
+ * The Trust Zone support in the port must be disabled in order to run FreeRTOS
+ * on the secure side. The following are the valid configuration seetings:
+ *
+ * 1. Run FreeRTOS on the Secure Side:
+ * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
+ *
+ * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
+ *
+ * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
+ * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
+ */
+#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
+ #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the NVIC.
+ */
+#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
+#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
+#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
+#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
+#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
+#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
+#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
+#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
+#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
+#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
+#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the SCB.
+ */
+#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 )
+#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to check the validity of an interrupt priority.
+ */
+#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
+#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
+#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
+#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
+#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
+#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
+#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
+#define portPRIGROUP_SHIFT ( 8UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the FPU.
+ */
+#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
+#define portCPACR_CP10_VALUE ( 3UL )
+#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE
+#define portCPACR_CP10_POS ( 20UL )
+#define portCPACR_CP11_POS ( 22UL )
+
+#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
+#define portFPCCR_ASPEN_POS ( 31UL )
+#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS )
+#define portFPCCR_LSPEN_POS ( 30UL )
+#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to manipulate the MPU.
+ */
+#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
+#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
+#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
+
+#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
+#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
+
+#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
+#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
+
+#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) )
+#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
+
+#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
+#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
+
+#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
+#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
+
+#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
+#define portMPU_MAIR_ATTR0_POS ( 0UL )
+#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR1_POS ( 8UL )
+#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR2_POS ( 16UL )
+#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR3_POS ( 24UL )
+#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 )
+
+#define portMPU_MAIR_ATTR4_POS ( 0UL )
+#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff )
+
+#define portMPU_MAIR_ATTR5_POS ( 8UL )
+#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 )
+
+#define portMPU_MAIR_ATTR6_POS ( 16UL )
+#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 )
+
+#define portMPU_MAIR_ATTR7_POS ( 24UL )
+#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 )
+
+#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL )
+#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL )
+
+#define portMPU_RLAR_REGION_ENABLE ( 1UL )
+
+/* Enable privileged access to unmapped region. */
+#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL )
+
+/* Enable MPU. */
+#define portMPU_ENABLE_BIT ( 1UL << 0UL )
+
+/* Expected value of the portMPU_TYPE register. */
+#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief The maximum 24-bit number.
+ *
+ * It is needed because the systick is a 24-bit counter.
+ */
+#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
+
+/**
+ * @brief A fiddle factor to estimate the number of SysTick counts that would
+ * have occurred while the SysTick counter is stopped during tickless idle
+ * calculations.
+ */
+#define portMISSED_COUNTS_FACTOR ( 94UL )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Constants required to set up the initial stack.
+ */
+#define portINITIAL_XPSR ( 0x01000000 )
+
+#if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF FD
+ * 1111 1111 1111 1111 1111 1111 1111 1101
+ *
+ * Bit[6] - 1 --> The exception was taken from the Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 1 --> The exception was taken to the Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xfffffffd )
+#else
+
+/**
+ * @brief Initial EXC_RETURN value.
+ *
+ * FF FF FF BC
+ * 1111 1111 1111 1111 1111 1111 1011 1100
+ *
+ * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
+ * Bit[5] - 1 --> Do not skip stacking of additional state context.
+ * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
+ * Bit[3] - 1 --> Return to the Thread mode.
+ * Bit[2] - 1 --> Restore registers from the process stack.
+ * Bit[1] - 0 --> Reserved, 0.
+ * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
+ */
+ #define portINITIAL_EXC_RETURN ( 0xffffffbc )
+#endif /* configRUN_FREERTOS_SECURE_ONLY */
+
+/**
+ * @brief CONTROL register privileged bit mask.
+ *
+ * Bit[0] in CONTROL register tells the privilege:
+ * Bit[0] = 0 ==> The task is privileged.
+ * Bit[0] = 1 ==> The task is not privileged.
+ */
+#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
+
+/**
+ * @brief Initial CONTROL register values.
+ */
+#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
+#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
+
+/**
+ * @brief Let the user override the default SysTick clock rate. If defined by the
+ * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
+ * configuration register.
+ */
+#ifndef configSYSTICK_CLOCK_HZ
+ #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
+ /* Ensure the SysTick is clocked at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
+#else
+ /* Select the option to clock SysTick not at the same frequency as the core. */
+ #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
+#endif
+
+/**
+ * @brief Let the user override the pre-loading of the initial LR with the
+ * address of prvTaskExitError() in case it messes up unwinding of the stack
+ * in the debugger.
+ */
+#ifdef configTASK_RETURN_ADDRESS
+ #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
+#else
+ #define portTASK_RETURN_ADDRESS prvTaskExitError
+#endif
+
+/**
+ * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
+ * when a task is created. This helps in debugging at the cost of code size.
+ */
+#define portPRELOAD_REGISTERS 1
+
+/**
+ * @brief A task is created without a secure context, and must call
+ * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
+ * any secure calls.
+ */
+#define portNO_SECURE_CONTEXT 0
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Used to catch tasks that attempt to return from their implementing
+ * function.
+ */
+static void prvTaskExitError( void );
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Setup the Memory Protection Unit (MPU).
+ */
+ static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_FPU == 1 )
+
+/**
+ * @brief Setup the Floating Point Unit (FPU).
+ */
+ static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_FPU */
+
+/**
+ * @brief Setup the timer to generate the tick interrupts.
+ *
+ * The implementation in this file is weak to allow application writers to
+ * change the timer used to generate the tick interrupt.
+ */
+void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether the current execution context is interrupt.
+ *
+ * @return pdTRUE if the current execution context is interrupt, pdFALSE
+ * otherwise.
+ */
+BaseType_t xPortIsInsideInterrupt( void );
+
+/**
+ * @brief Yield the processor.
+ */
+void vPortYield( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enter critical section.
+ */
+void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Exit from critical section.
+ */
+void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SysTick handler.
+ */
+void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief C part of SVC handler.
+ */
+portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/**
+ * @brief This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
+
+/**
+ * @brief Each task maintains its own interrupt status in the critical nesting
+ * variable.
+ */
+PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Saved as part of the task context to indicate which context the
+ * task is using on the secure side.
+ */
+ PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
+#endif /* configENABLE_TRUSTZONE */
+
+/**
+ * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
+ * FreeRTOS API functions are not called from interrupts that have been assigned
+ * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ */
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ static uint8_t ucMaxSysCallPriority = 0;
+ static uint32_t ulMaxPRIGROUPValue = 0;
+ static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+
+/**
+ * @brief The number of SysTick increments that make up one tick period.
+ */
+ PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
+
+/**
+ * @brief The maximum number of tick periods that can be suppressed is
+ * limited by the 24 bit resolution of the SysTick timer.
+ */
+ PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
+
+/**
+ * @brief Compensate for the CPU cycles that pass while the SysTick is
+ * stopped (low power functionality only).
+ */
+ PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TICKLESS_IDLE == 1 )
+ __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
+ {
+ uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
+ TickType_t xModifiableIdleTime;
+
+ /* Make sure the SysTick reload value does not overflow the counter. */
+ if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
+ {
+ xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
+ }
+
+ /* Enter a critical section but don't use the taskENTER_CRITICAL()
+ * method as that will mask interrupts that should exit sleep mode. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* If a context switch is pending or a task is waiting for the scheduler
+ * to be unsuspended then abandon the low power entry. */
+ if( eTaskConfirmSleepModeStatus() == eAbortSleep )
+ {
+ /* Re-enable interrupts - see comments above the cpsid instruction
+ * above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ else
+ {
+ /* Stop the SysTick momentarily. The time the SysTick is stopped for
+ * is accounted for as best it can be, but using the tickless mode will
+ * inevitably result in some tiny drift of the time maintained by the
+ * kernel with respect to calendar time. */
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Use the SysTick current-value register to determine the number of
+ * SysTick decrements remaining until the next tick interrupt. If the
+ * current-value register is zero, then there are actually
+ * ulTimerCountsForOneTick decrements remaining, not zero, because the
+ * SysTick requests the interrupt when decrementing from 1 to 0. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
+ }
+
+ /* Calculate the reload value required to wait xExpectedIdleTime
+ * tick periods. -1 is used because this code normally executes part
+ * way through the first tick period. But if the SysTick IRQ is now
+ * pending, then clear the IRQ, suppressing the first tick, and correct
+ * the reload value to reflect that the second tick period is already
+ * underway. The expected idle time is always at least two ticks. */
+ ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
+
+ if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
+ {
+ portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
+ ulReloadValue -= ulTimerCountsForOneTick;
+ }
+
+ if( ulReloadValue > ulStoppedTimerCompensation )
+ {
+ ulReloadValue -= ulStoppedTimerCompensation;
+ }
+
+ /* Set the new reload value. */
+ portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
+
+ /* Clear the SysTick count flag and set the count value back to
+ * zero. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Restart SysTick. */
+ portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
+
+ /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
+ * set its parameter to 0 to indicate that its implementation contains
+ * its own wait for interrupt or wait for event instruction, and so wfi
+ * should not be executed again. However, the original expected idle
+ * time variable must remain unmodified, so a copy is taken. */
+ xModifiableIdleTime = xExpectedIdleTime;
+ configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
+
+ if( xModifiableIdleTime > 0 )
+ {
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "wfi" );
+ __asm volatile ( "isb" );
+ }
+
+ configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
+
+ /* Re-enable interrupts to allow the interrupt that brought the MCU
+ * out of sleep mode to execute immediately. See comments above
+ * the cpsid instruction above. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable interrupts again because the clock is about to be stopped
+ * and interrupts that execute while the clock is stopped will increase
+ * any slippage between the time maintained by the RTOS and calendar
+ * time. */
+ __asm volatile ( "cpsid i" ::: "memory" );
+ __asm volatile ( "dsb" );
+ __asm volatile ( "isb" );
+
+ /* Disable the SysTick clock without reading the
+ * portNVIC_SYSTICK_CTRL_REG register to ensure the
+ * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
+ * the time the SysTick is stopped for is accounted for as best it can
+ * be, but using the tickless mode will inevitably result in some tiny
+ * drift of the time maintained by the kernel with respect to calendar
+ * time*/
+ portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
+
+ /* Determine whether the SysTick has already counted to zero. */
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ uint32_t ulCalculatedLoadValue;
+
+ /* The tick interrupt ended the sleep (or is now pending), and
+ * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
+ * with whatever remains of the new tick period. */
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
+
+ /* Don't allow a tiny value, or values that have somehow
+ * underflowed because the post sleep hook did something
+ * that took too long or because the SysTick current-value register
+ * is zero. */
+ if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
+ {
+ ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
+
+ /* As the pending tick will be processed as soon as this
+ * function exits, the tick value maintained by the tick is stepped
+ * forward by one less than the time spent waiting. */
+ ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
+ }
+ else
+ {
+ /* Something other than the tick interrupt ended the sleep. */
+
+ /* Use the SysTick current-value register to determine the
+ * number of SysTick decrements remaining until the expected idle
+ * time would have ended. */
+ ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
+ {
+ /* If the SysTick is not using the core clock, the current-
+ * value register might still be zero here. In that case, the
+ * SysTick didn't load from the reload register, and there are
+ * ulReloadValue decrements remaining in the expected idle
+ * time, not zero. */
+ if( ulSysTickDecrementsLeft == 0 )
+ {
+ ulSysTickDecrementsLeft = ulReloadValue;
+ }
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Work out how long the sleep lasted rounded to complete tick
+ * periods (not the ulReload value which accounted for part
+ * ticks). */
+ ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
+
+ /* How many complete tick periods passed while the processor
+ * was waiting? */
+ ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
+
+ /* The reload value is set to whatever fraction of a single tick
+ * period remains. */
+ portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
+ }
+
+ /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
+ * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
+ * the SysTick is not using the core clock, temporarily configure it to
+ * use the core clock. This configuration forces the SysTick to load
+ * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
+ * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
+ * to receive the standard value immediately. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
+ {
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ }
+ #else
+ {
+ /* The temporary usage of the core clock has served its purpose,
+ * as described above. Resume usage of the other clock. */
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
+
+ if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
+ {
+ /* The partial tick period already ended. Be sure the SysTick
+ * counts it only once. */
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
+ }
+
+ portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+ }
+ #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
+
+ /* Step the tick to account for any tick periods that elapsed. */
+ vTaskStepTick( ulCompleteTickPeriods );
+
+ /* Exit with interrupts enabled. */
+ __asm volatile ( "cpsie i" ::: "memory" );
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Calculate the constants required to configure the tick interrupt. */
+ #if ( configUSE_TICKLESS_IDLE == 1 )
+ {
+ ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
+ xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
+ ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
+ }
+ #endif /* configUSE_TICKLESS_IDLE */
+
+ /* Stop and reset the SysTick. */
+ portNVIC_SYSTICK_CTRL_REG = 0UL;
+ portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
+
+ /* Configure SysTick to interrupt at the requested rate. */
+ portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
+ portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ volatile uint32_t ulDummy = 0UL;
+
+ /* A function that implements a task must not exit or attempt to return to
+ * its caller as there is nothing to return to. If a task wants to exit it
+ * should instead call vTaskDelete( NULL ). Artificially force an assert()
+ * to be triggered if configASSERT() is defined, then stop here so
+ * application writers can catch the error. */
+ configASSERT( ulCriticalNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+
+ while( ulDummy == 0 )
+ {
+ /* This file calls prvTaskExitError() after the scheduler has been
+ * started to remove a compiler warning about the function being
+ * defined but never called. ulDummy is used purely to quieten other
+ * warnings about code appearing after this function is called - making
+ * ulDummy volatile makes the compiler think the function could return
+ * and therefore not output an 'unreachable code' warning for code that
+ * appears after it. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ extern uint32_t * __unprivileged_flash_start__;
+ extern uint32_t * __unprivileged_flash_end__;
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else /* if defined( __ARMCC_VERSION ) */
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ extern uint32_t __unprivileged_flash_start__[];
+ extern uint32_t __unprivileged_flash_end__[];
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* The only permitted number of regions are 8 or 16. */
+ configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
+
+ /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
+ configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
+
+ /* Check that the MPU is present. */
+ if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
+ {
+ /* MAIR0 - Index 0. */
+ portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ /* MAIR0 - Index 1. */
+ portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* Setup privileged flash as Read Only so that privileged tasks can
+ * read it but not modify. */
+ portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged flash as Read Only by both privileged and
+ * unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup unprivileged syscalls flash as Read Only by both privileged
+ * and unprivileged tasks. All tasks can read it but no-one can modify. */
+ portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_ONLY );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Setup RAM containing kernel data for privileged access only. */
+ portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
+ portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+ portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Enable mem fault. */
+ portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
+
+ /* Enable MPU with privileged background access i.e. unmapped
+ * regions have privileged access. */
+ portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_FPU == 1 )
+ static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* Enable non-secure access to the FPU. */
+ SecureInit_EnableNSFPUAccess();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
+ * unprivileged code should be able to access FPU. CP11 should be
+ * programmed to the same value as CP10. */
+ *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
+ ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
+ );
+
+ /* ASPEN = 1 ==> Hardware should automatically preserve floating point
+ * context on exception entry and restore on exception return.
+ * LSPEN = 1 ==> Enable lazy context save of FP state. */
+ *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
+ }
+#endif /* configENABLE_FPU */
+/*-----------------------------------------------------------*/
+
+void vPortYield( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Set a PendSV to request a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ portDISABLE_INTERRUPTS();
+ ulCriticalNesting++;
+
+ /* Barriers are normally not required but do ensure the code is
+ * completely within the specified behaviour for the architecture. */
+ __asm volatile ( "dsb" ::: "memory" );
+ __asm volatile ( "isb" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
+{
+ configASSERT( ulCriticalNesting );
+ ulCriticalNesting--;
+
+ if( ulCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+}
+/*-----------------------------------------------------------*/
+
+void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulPreviousMask;
+
+ ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ /* Pend a context switch. */
+ portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
+}
+/*-----------------------------------------------------------*/
+
+void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
+{
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ uint32_t ulPC;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ uint32_t ulR0, ulR1;
+ extern TaskHandle_t pxCurrentTCB;
+ #if ( configENABLE_MPU == 1 )
+ uint32_t ulControl, ulIsTaskPrivileged;
+ #endif /* configENABLE_MPU */
+ #endif /* configENABLE_TRUSTZONE */
+ uint8_t ucSVCNumber;
+
+ /* Register are stored on the stack in the following order - R0, R1, R2, R3,
+ * R12, LR, PC, xPSR. */
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
+ ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
+
+ switch( ucSVCNumber )
+ {
+ #if ( configENABLE_TRUSTZONE == 1 )
+ case portSVC_ALLOCATE_SECURE_CONTEXT:
+
+ /* R0 contains the stack size passed as parameter to the
+ * vPortAllocateSecureContext function. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Read the CONTROL register value. */
+ __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) );
+
+ /* The task that raised the SVC is privileged if Bit[0]
+ * in the CONTROL register is 0. */
+ ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
+
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
+ }
+ #else /* if ( configENABLE_MPU == 1 ) */
+ {
+ /* Allocate and load a context for the secure task. */
+ xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
+ }
+ #endif /* configENABLE_MPU */
+
+ configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
+ SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
+ break;
+
+ case portSVC_FREE_SECURE_CONTEXT:
+
+ /* R0 contains TCB being freed and R1 contains the secure
+ * context handle to be freed. */
+ ulR0 = pulCallerStackAddress[ 0 ];
+ ulR1 = pulCallerStackAddress[ 1 ];
+
+ /* Free the secure context. */
+ SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
+ break;
+ #endif /* configENABLE_TRUSTZONE */
+
+ case portSVC_START_SCHEDULER:
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ /* De-prioritize the non-secure exceptions so that the
+ * non-secure pendSV runs at the lowest priority. */
+ SecureInit_DePrioritizeNSExceptions();
+
+ /* Initialize the secure context management system. */
+ SecureContext_Init();
+ }
+ #endif /* configENABLE_TRUSTZONE */
+
+ #if ( configENABLE_FPU == 1 )
+ {
+ /* Setup the Floating Point Unit (FPU). */
+ prvSetupFPU();
+ }
+ #endif /* configENABLE_FPU */
+
+ /* Setup the context of the first task so that the first task starts
+ * executing. */
+ vRestoreContextOfFirstTask();
+ break;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
+
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default:
+ /* Incorrect SVC call. */
+ configASSERT( pdFALSE );
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Store the value of the PSPLIM register before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+ {
+ __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_functions_start__;
+ extern uint32_t * __privileged_functions_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_functions_start__[];
+ extern uint32_t __privileged_functions_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile
+ (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* Restore the PSPLIM register to what it was at the time of
+ * system call entry. */
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile
+ (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+ }
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+ }
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+ }
+
+#else /* configENABLE_MPU */
+
+ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
+ {
+ /* Simulate the stack frame as it would be created by a context switch
+ * interrupt. */
+ #if ( portPRELOAD_REGISTERS == 0 )
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
+ *pxTopOfStack = portINITIAL_EXC_RETURN;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #else /* portPRELOAD_REGISTERS */
+ {
+ pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
+ pxTopOfStack--;
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ pxTopOfStack--;
+ *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ }
+ #endif /* portPRELOAD_REGISTERS */
+
+ return pxTopOfStack;
+ }
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+ {
+ volatile uint32_t ulOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ulOriginalPriority = portNVIC_SHPR2_REG;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ portNVIC_SHPR2_REG = 0xFF000000;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
+ {
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
+ }
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ portNVIC_SHPR2_REG = ulOriginalPriority;
+ }
+ #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+
+ /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
+ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
+ portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
+
+ #if ( configENABLE_MPU == 1 )
+ {
+ /* Setup the Memory Protection Unit (MPU). */
+ prvSetupMPU();
+ }
+ #endif /* configENABLE_MPU */
+
+ /* Start the timer that generates the tick ISR. Interrupts are disabled
+ * here already. */
+ vPortSetupTimerInterrupt();
+
+ /* Initialize the critical nesting count ready for the first task. */
+ ulCriticalNesting = 0;
+
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
+ /* Start the first task. */
+ vStartFirstTask();
+
+ /* Should never get here as the tasks will now be executing. Call the task
+ * exit error function to prevent compiler warnings about a static function
+ * not being called in the case that the application writer overrides this
+ * functionality by defining configTASK_RETURN_ADDRESS. Call
+ * vTaskSwitchContext() so link time optimization does not remove the
+ * symbol. */
+ vTaskSwitchContext();
+ prvTaskExitError();
+
+ /* Should not get here. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
+{
+ /* Not implemented in ports where there is nothing to return to.
+ * Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
+ const struct xMEMORY_REGION * const xRegions,
+ StackType_t * pxBottomOfStack,
+ uint32_t ulStackDepth )
+ {
+ uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
+ int32_t lIndex = 0;
+
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __privileged_sram_start__;
+ extern uint32_t * __privileged_sram_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __privileged_sram_start__[];
+ extern uint32_t __privileged_sram_end__[];
+ #endif /* defined( __ARMCC_VERSION ) */
+
+ /* Setup MAIR0. */
+ xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
+ xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
+
+ /* This function is called automatically when the task is created - in
+ * which case the stack region parameters will be valid. At all other
+ * times the stack parameters will not be valid and it is assumed that
+ * the stack region has already been configured. */
+ if( ulStackDepth > 0 )
+ {
+ ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
+
+ /* If the stack is within the privileged SRAM, do not protect it
+ * using a separate MPU region. This is needed because privileged
+ * SRAM is already protected using an MPU region and ARMv8-M does
+ * not allow overlapping MPU regions. */
+ if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
+ ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
+ {
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
+ }
+ else
+ {
+ /* Define the region that allows access to the stack. */
+ ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE ) |
+ ( portMPU_REGION_READ_WRITE ) |
+ ( portMPU_REGION_EXECUTE_NEVER );
+
+ xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_ATTR_INDEX0 ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+ }
+ }
+
+ /* User supplied configurable regions. */
+ for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
+ {
+ /* If xRegions is NULL i.e. the task has not specified any MPU
+ * region, the else part ensures that all the configurable MPU
+ * regions are invalidated. */
+ if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
+ {
+ /* Translate the generic region definition contained in xRegions
+ * into the ARMv8 specific MPU settings that are then stored in
+ * xMPUSettings. */
+ ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
+ ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
+ ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
+
+ /* Start address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
+ ( portMPU_REGION_NON_SHAREABLE );
+
+ /* RO/RW. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
+ }
+ else
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
+ }
+
+ /* XN. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
+ {
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
+ }
+
+ /* End Address. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
+ ( portMPU_RLAR_REGION_ENABLE );
+
+ /* Normal memory/ Device memory. */
+ if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
+ {
+ /* Attr1 in MAIR0 is configured as device memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
+ }
+ else
+ {
+ /* Attr0 in MAIR0 is configured as normal memory. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
+ }
+ }
+ else
+ {
+ /* Invalidate the region. */
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
+ xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
+ }
+
+ lIndex++;
+ }
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. Interrupt Program
+ * Status Register (IPSR) holds the exception number of the currently-executing
+ * exception or zero for Thread mode.*/
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ uint32_t ulCurrentInterrupt;
+ uint8_t ucCurrentPriority;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
+
+ /* Is the interrupt number a user defined interrupt? */
+ if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
+ {
+ /* Look up the interrupt's priority. */
+ ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
+
+ /* The following assertion will fail if a service routine (ISR) for
+ * an interrupt that has been assigned a priority above
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ * function. ISR safe FreeRTOS API functions must *only* be called
+ * from interrupts that have been assigned a priority at or below
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Numerically low interrupt priority numbers represent logically high
+ * interrupt priorities, therefore the priority of the interrupt must
+ * be set to a value equal to or numerically *higher* than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY.
+ *
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
+ * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
+ * and therefore also guaranteed to be invalid.
+ *
+ * FreeRTOS maintains separate thread and ISR API functions to ensure
+ * interrupt entry is as fast and simple as possible.
+ *
+ * The following links provide detailed information:
+ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ * https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
+ }
+
+ /* Priority grouping: The interrupt controller (NVIC) allows the bits
+ * that define each interrupt's priority to be split between bits that
+ * define the interrupt's pre-emption priority bits and bits that define
+ * the interrupt's sub-priority. For simplicity all bits must be defined
+ * to be pre-emption priority bits. The following assertion will fail if
+ * this is not the case (if some bits represent a sub-priority).
+ *
+ * If the application only uses CMSIS libraries for interrupt
+ * configuration then the correct setting can be achieved on all Cortex-M
+ * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
+ * scheduler. Note however that some vendor specific peripheral libraries
+ * assume a non-zero priority group setting, in which cases using a value
+ * of zero will result in unpredictable behaviour. */
+ configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
+ }
+
+#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h
new file mode 100644
index 0000000..f64ceb5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h
@@ -0,0 +1,114 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef __PORT_ASM_H__
+#define __PORT_ASM_H__
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+
+/* MPU wrappers includes. */
+#include "mpu_wrappers.h"
+
+/**
+ * @brief Restore the context of the first task so that the first task starts
+ * executing.
+ */
+void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Raises the privilege level by clearing the bit 0 of the CONTROL
+ * register.
+ *
+ * @note This is a privileged function and should only be called from the kenrel
+ * code.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ *
+ * Bit 0 of the CONTROL register defines the privilege level of Thread Mode.
+ * Bit[0] = 0 --> The processor is running privileged
+ * Bit[0] = 1 --> The processor is running unprivileged.
+ */
+void vResetPrivilege( void ) __attribute__( ( naked ) );
+
+/**
+ * @brief Starts the first task.
+ */
+void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Disables interrupts.
+ */
+uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Enables interrupts.
+ */
+void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief PendSV Exception handler.
+ */
+void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief SVC Handler.
+ */
+void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Allocate a Secure context for the calling task.
+ *
+ * @param[in] ulSecureStackSize The size of the stack to be allocated on the
+ * secure side for the calling task.
+ */
+void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) );
+
+/**
+ * @brief Free the task's secure context.
+ *
+ * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task.
+ */
+void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
+
+#endif /* __PORT_ASM_H__ */
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
new file mode 100644
index 0000000..00ee5a5
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
@@ -0,0 +1,402 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+contains code not understood by the assembler - for example the 'extern' keyword.
+To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+the code is included in C files but excluded by the preprocessor in assembly
+files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+/* System call numbers includes. */
+#include "mpu_syscall_numbers.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+ EXTERN pxCurrentTCB
+ EXTERN vTaskSwitchContext
+ EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallExit
+#endif
+
+ PUBLIC xIsPrivileged
+ PUBLIC vResetPrivilege
+ PUBLIC vRestoreContextOfFirstTask
+ PUBLIC vRaisePrivilege
+ PUBLIC vStartFirstTask
+ PUBLIC ulSetInterruptMask
+ PUBLIC vClearInterruptMask
+ PUBLIC PendSV_Handler
+ PUBLIC SVC_Handler
+/*-----------------------------------------------------------*/
+
+/*---------------- Unprivileged Functions -------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION .text:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+xIsPrivileged:
+ mrs r0, control /* r0 = CONTROL. */
+ tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
+ ite ne
+ movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
+ moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vResetPrivilege:
+ mrs r0, control /* r0 = CONTROL. */
+ orr r0, r0, #1 /* r0 = r0 | 1. */
+ msr control, r0 /* CONTROL = r0. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+/*----------------- Privileged Functions --------------------*/
+
+/*-----------------------------------------------------------*/
+
+ SECTION privileged_functions:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+vRestoreContextOfFirstTask:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+
+ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ msr psplim, r1 /* Set this task's PSPLIM value. */
+ movs r1, #2 /* r1 = 2. */
+ msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
+ adds r0, #32 /* Discard everything up to r0. */
+ msr psp, r0 /* This is now the new top of stack to use in the task. */
+ isb
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx r2 /* Finally, branch to EXC_RETURN. */
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+vRaisePrivilege:
+ mrs r0, control /* Read the CONTROL register. */
+ bic r0, r0, #1 /* Clear the bit 0. */
+ msr control, r0 /* Write back the new CONTROL value. */
+ bx lr /* Return to the caller. */
+/*-----------------------------------------------------------*/
+
+vStartFirstTask:
+ ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
+ ldr r0, [r0] /* The first entry in vector table is stack pointer. */
+ msr msp, r0 /* Set the MSP back to the start of the stack. */
+ cpsie i /* Globally enable interrupts. */
+ cpsie f
+ dsb
+ isb
+ svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
+/*-----------------------------------------------------------*/
+
+ulSetInterruptMask:
+ mrs r0, basepri /* r0 = basepri. Return original basepri value. */
+ mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+vClearInterruptMask:
+ msr basepri, r0 /* basepri = ulMask. */
+ dsb
+ isb
+ bx lr /* Return. */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
+PendSV_Handler:
+ mrs r0, psp /* Read PSP in r0. */
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+ mrs r2, psplim /* r2 = PSPLIM. */
+ mov r3, lr /* r3 = LR/EXC_RETURN. */
+ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ str r0, [r1] /* Save the new top of stack in TCB. */
+
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r2] /* Read pxCurrentTCB. */
+ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
+
+ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
+
+#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
+ it eq
+ vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
+#endif /* configENABLE_FPU || configENABLE_MVE */
+
+ msr psplim, r2 /* Restore the PSPLIM register value for the task. */
+ msr psp, r0 /* Remember the new top of stack for the task. */
+ bx r3
+
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+ b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+ END
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h
new file mode 100644
index 0000000..ee5baf1
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h
@@ -0,0 +1,85 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_MVE
+ #error configENABLE_MVE must be defined in FreeRTOSConfig.h. Set configENABLE_MVE to 1 to enable the MVE or 0 to disable the MVE.
+#endif /* configENABLE_MVE */
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portARCH_NAME "Cortex-M85"
+#define portHAS_BASEPRI 1
+#define portDONT_DISCARD __root
+/*-----------------------------------------------------------*/
+
+/* ARMv8-M common port configurations. */
+#include "portmacrocommon.h"
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portDISABLE_INTERRUPTS() ulSetInterruptMask()
+#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 )
+/*-----------------------------------------------------------*/
+
+/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in
+ * the source code because to do so would cause other compilers to generate
+ * warnings. */
+#pragma diag_suppress=Be006
+#pragma diag_suppress=Pa082
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
new file mode 100644
index 0000000..6f666da
--- /dev/null
+++ b/Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
@@ -0,0 +1,449 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACROCOMMON_H
+#define PORTMACROCOMMON_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*------------------------------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *------------------------------------------------------------------------------
+ */
+
+#ifndef configENABLE_FPU
+ #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU.
+#endif /* configENABLE_FPU */
+
+#ifndef configENABLE_MPU
+ #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
+#endif /* configENABLE_MPU */
+
+#ifndef configENABLE_TRUSTZONE
+ #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone.
+#endif /* configENABLE_TRUSTZONE */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Type definitions.
+ */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ * not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * Architecture specifics.
+ */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+#define portNOP()
+#define portINLINE __inline
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE inline __attribute__( ( always_inline ) )
+#endif
+#define portHAS_STACK_OVERFLOW_CHECKING 1
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Extern declarations.
+ */
+extern BaseType_t xPortIsInsideInterrupt( void );
+
+extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */;
+
+extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */;
+extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */;
+
+extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
+
+#if ( configENABLE_TRUSTZONE == 1 )
+ extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */
+ extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */;
+#endif /* configENABLE_TRUSTZONE */
+
+#if ( configENABLE_MPU == 1 )
+ extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
+ extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief MPU specific constants.
+ */
+#if ( configENABLE_MPU == 1 )
+ #define portUSING_MPU_WRAPPERS 1
+ #define portPRIVILEGE_BIT ( 0x80000000UL )
+#else
+ #define portPRIVILEGE_BIT ( 0x0UL )
+#endif /* configENABLE_MPU */
+
+/* MPU settings that can be overriden in FreeRTOSConfig.h. */
+#ifndef configTOTAL_MPU_REGIONS
+ /* Define to 8 for backward compatibility. */
+ #define configTOTAL_MPU_REGIONS ( 8UL )
+#endif
+
+/* MPU regions. */
+#define portPRIVILEGED_FLASH_REGION ( 0UL )
+#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
+#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL )
+#define portPRIVILEGED_RAM_REGION ( 3UL )
+#define portSTACK_REGION ( 4UL )
+#define portFIRST_CONFIGURABLE_REGION ( 5UL )
+#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL )
+#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
+#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
+
+/* Device memory attributes used in MPU_MAIR registers.
+ *
+ * 8-bit values encoded as follows:
+ * Bit[7:4] - 0000 - Device Memory
+ * Bit[3:2] - 00 --> Device-nGnRnE
+ * 01 --> Device-nGnRE
+ * 10 --> Device-nGRE
+ * 11 --> Device-GRE
+ * Bit[1:0] - 00, Reserved.
+ */
+#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
+#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
+#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
+#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
+
+/* Normal memory attributes used in MPU_MAIR registers. */
+#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
+#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
+
+/* Attributes used in MPU_RBAR registers. */
+#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
+#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
+#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
+
+#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
+#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
+#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
+#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
+
+#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+ #define portACL_ENTRY_SIZE_BITS ( 32U )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Validate priority of ISRs that are allowed to call FreeRTOS
+ * system calls.
+ */
+#ifdef configASSERT
+ #if ( portHAS_BASEPRI == 1 )
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+ #endif
+#endif
+
+/**
+ * @brief SVC numbers.
+ */
+#define portSVC_ALLOCATE_SECURE_CONTEXT 100
+#define portSVC_FREE_SECURE_CONTEXT 101
+#define portSVC_START_SCHEDULER 102
+#define portSVC_RAISE_PRIVILEGE 103
+#define portSVC_SYSTEM_CALL_EXIT 104
+#define portSVC_YIELD 105
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Scheduler utilities.
+ */
+#define portYIELD() vPortYield()
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Critical section management.
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x )
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Tickless idle/low power functionality.
+ */
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Task function macros as described on the FreeRTOS.org WEB site.
+ */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_TRUSTZONE == 1 )
+
+/**
+ * @brief Allocate a secure context for the task.
+ *
+ * Tasks are not created with a secure context. Any task that is going to call
+ * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
+ * secure context before it calls any secure function.
+ *
+ * @param[in] ulSecureStackSize The size of the secure stack to be allocated.
+ */
+ #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
+
+/**
+ * @brief Called when a task is deleted to delete the task's secure context,
+ * if it has one.
+ *
+ * @param[in] pxTCB The TCB of the task being deleted.
+ */
+ #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
+#endif /* configENABLE_TRUSTZONE */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+/**
+ * @brief Checks whether or not the processor is privileged.
+ *
+ * @return 1 if the processor is already privileged, 0 otherwise.
+ */
+ #define portIS_PRIVILEGED() xIsPrivileged()
+
+/**
+ * @brief Raise an SVC request to raise privilege.
+ *
+ * The SVC handler checks that the SVC was raised from a system call and only
+ * then it raises the privilege. If this is called from any other place,
+ * the privilege is not raised.
+ */
+ #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
+
+/**
+ * @brief Lowers the privilege level by setting the bit 0 of the CONTROL
+ * register.
+ */
+ #define portRESET_PRIVILEGE() vResetPrivilege()
+#else
+ #define portIS_PRIVILEGED()
+ #define portRAISE_PRIVILEGE()
+ #define portRESET_PRIVILEGE()
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Barriers.
+ */
+#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
+/*-----------------------------------------------------------*/
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACROCOMMON_H */
diff --git a/Source/portable/Keil/See-also-the-RVDS-directory.txt b/Source/portable/Keil/See-also-the-RVDS-directory.txt
index bd7fab7..944c593 100644
--- a/Source/portable/Keil/See-also-the-RVDS-directory.txt
+++ b/Source/portable/Keil/See-also-the-RVDS-directory.txt
@@ -1 +1 @@
-Nothing to see here.
\ No newline at end of file
+Nothing to see here.
diff --git a/Source/portable/MemMang/heap_1.c b/Source/portable/MemMang/heap_1.c
index 43a55f9..da11bfc 100644
--- a/Source/portable/MemMang/heap_1.c
+++ b/Source/portable/MemMang/heap_1.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/MemMang/heap_2.c b/Source/portable/MemMang/heap_2.c
index 497295d..9f363f1 100644
--- a/Source/portable/MemMang/heap_2.c
+++ b/Source/portable/MemMang/heap_2.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/MemMang/heap_3.c b/Source/portable/MemMang/heap_3.c
index 89c011a..f0ecc96 100644
--- a/Source/portable/MemMang/heap_3.c
+++ b/Source/portable/MemMang/heap_3.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/MemMang/heap_4.c b/Source/portable/MemMang/heap_4.c
index 7748f50..013364f 100644
--- a/Source/portable/MemMang/heap_4.c
+++ b/Source/portable/MemMang/heap_4.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -96,8 +96,8 @@
* of their memory address. */
typedef struct A_BLOCK_LINK
{
- struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */
- size_t xBlockSize; /*<< The size of the free block. */
+ struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
+ size_t xBlockSize; /**< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
@@ -159,13 +159,31 @@
if( xWantedSize > 0 )
{
/* The wanted size must be increased so it can contain a BlockLink_t
- * structure in addition to the requested amount of bytes. Some
- * additional increment may also be needed for alignment. */
- xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
-
- if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
+ * structure in addition to the requested amount of bytes. */
+ if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 )
{
- xWantedSize += xAdditionalRequiredSize;
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number
+ * of bytes. */
+ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
+
+ if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
+ {
+ xWantedSize += xAdditionalRequiredSize;
+ }
+ else
+ {
+ xWantedSize = 0;
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
}
else
{
@@ -390,7 +408,7 @@
{
uxAddress += ( portBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
- xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap;
+ xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap );
}
pucAlignedHeap = ( uint8_t * ) uxAddress;
@@ -402,7 +420,7 @@
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
- uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize;
+ uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize );
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
pxEnd = ( BlockLink_t * ) uxAddress;
diff --git a/Source/portable/MemMang/heap_5.c b/Source/portable/MemMang/heap_5.c
index 5c95925..79b3a8a 100644
--- a/Source/portable/MemMang/heap_5.c
+++ b/Source/portable/MemMang/heap_5.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -170,13 +170,31 @@
if( xWantedSize > 0 )
{
/* The wanted size must be increased so it can contain a BlockLink_t
- * structure in addition to the requested amount of bytes. Some
- * additional increment may also be needed for alignment. */
- xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
-
- if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
+ * structure in addition to the requested amount of bytes. */
+ if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 )
{
- xWantedSize += xAdditionalRequiredSize;
+ xWantedSize += xHeapStructSize;
+
+ /* Ensure that blocks are always aligned to the required number
+ * of bytes. */
+ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
+
+ if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
+ {
+ xWantedSize += xAdditionalRequiredSize;
+ }
+ else
+ {
+ xWantedSize = 0;
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
}
else
{
diff --git a/Source/portable/RVDS/ARM_CA9/port.c b/Source/portable/RVDS/ARM_CA9/port.c
new file mode 100644
index 0000000..d418131
--- /dev/null
+++ b/Source/portable/RVDS/ARM_CA9/port.c
@@ -0,0 +1,477 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
+ #error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET
+ #error configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configUNIQUE_INTERRUPT_PRIORITIES
+ #error configUNIQUE_INTERRUPT_PRIORITIES must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#ifndef configSETUP_TICK_INTERRUPT
+ #error configSETUP_TICK_INTERRUPT() must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif /* configSETUP_TICK_INTERRUPT */
+
+#ifndef configMAX_API_CALL_INTERRUPT_PRIORITY
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be defined. See https://www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY == 0
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must not be set to 0
+#endif
+
+#if configMAX_API_CALL_INTERRUPT_PRIORITY > configUNIQUE_INTERRUPT_PRIORITIES
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be less than or equal to configUNIQUE_INTERRUPT_PRIORITIES as the lower the numeric priority value the higher the logical interrupt priority
+#endif
+
+#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+ /* Check the configuration. */
+ #if( configMAX_PRIORITIES > 32 )
+ #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice.
+ #endif
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+/* In case security extensions are implemented. */
+#if configMAX_API_CALL_INTERRUPT_PRIORITY <= ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
+#endif
+
+#ifndef configCLEAR_TICK_INTERRUPT
+ #define configCLEAR_TICK_INTERRUPT()
+#endif
+
+/* The number of bits to shift for an interrupt priority is dependent on the
+number of bits implemented by the interrupt controller. */
+#if configUNIQUE_INTERRUPT_PRIORITIES == 16
+ #define portPRIORITY_SHIFT 4
+ #define portMAX_BINARY_POINT_VALUE 3
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 32
+ #define portPRIORITY_SHIFT 3
+ #define portMAX_BINARY_POINT_VALUE 2
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 64
+ #define portPRIORITY_SHIFT 2
+ #define portMAX_BINARY_POINT_VALUE 1
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 128
+ #define portPRIORITY_SHIFT 1
+ #define portMAX_BINARY_POINT_VALUE 0
+#elif configUNIQUE_INTERRUPT_PRIORITIES == 256
+ #define portPRIORITY_SHIFT 0
+ #define portMAX_BINARY_POINT_VALUE 0
+#else
+ #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware
+#endif
+
+/* A critical section is exited when the critical section nesting count reaches
+this value. */
+#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
+
+/* In all GICs 255 can be written to the priority mask register to unmask all
+(but the lowest) interrupt priority. */
+#define portUNMASK_VALUE ( 0xFFUL )
+
+/* Tasks are not created with a floating point context, but can be given a
+floating point context after they have been created. A variable is stored as
+part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
+does not have an FPU context, or any other value if the task does have an FPU
+context. */
+#define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
+
+/* Interrupt controller access addresses. */
+#define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C )
+#define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 )
+#define portICCBPR_BINARY_POINT_OFFSET ( 0x08 )
+#define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 )
+#define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) )
+#define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET )
+#define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET )
+#define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET )
+#define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) )
+#define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) )
+
+/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary
+point is zero. */
+#define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 )
+
+/* Constants required to setup the initial task context. */
+#define portINITIAL_SPSR ( ( StackType_t ) 0x1f ) /* System mode, ARM mode, interrupts enabled. */
+#define portTHUMB_MODE_BIT ( ( StackType_t ) 0x20 )
+#define portTHUMB_MODE_ADDRESS ( 0x01UL )
+
+/* Masks all bits in the APSR other than the mode bits. */
+#define portAPSR_MODE_BITS_MASK ( 0x1F )
+
+/* The value of the mode bits in the APSR when the CPU is executing in user
+mode. */
+#define portAPSR_USER_MODE ( 0x10 )
+
+/* Macro to unmask all interrupt priorities. */
+#define portCLEAR_INTERRUPT_MASK() \
+{ \
+ __disable_irq(); \
+ portICCPMR_PRIORITY_MASK_REGISTER = portUNMASK_VALUE; \
+ __asm( "DSB \n" \
+ "ISB \n" ); \
+ __enable_irq(); \
+}
+
+/*-----------------------------------------------------------*/
+
+/*
+ * Starts the first task executing. This function is necessarily written in
+ * assembly code so is implemented in portASM.s.
+ */
+extern void vPortRestoreTaskContext( void );
+
+/*
+ * Used to catch tasks that attempt to return from their implementing function.
+ */
+static void prvTaskExitError( void );
+
+/*-----------------------------------------------------------*/
+
+/* A variable is used to keep track of the critical section nesting. This
+variable has to be stored as part of the task context and must be initialised to
+a non zero value to ensure interrupts don't inadvertently become unmasked before
+the scheduler starts. As it is stored as part of the task context it will
+automatically be set to 0 when the first task is started. */
+volatile uint32_t ulCriticalNesting = 9999UL;
+
+/* Used to pass constants into the ASM code. The address at which variables are
+placed is the constant value so indirect loads in the asm code are not
+required. */
+uint32_t ulICCIAR __attribute__( ( at( portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ) ) );
+uint32_t ulICCEOIR __attribute__( ( at( portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ) ) );
+uint32_t ulICCPMR __attribute__( ( at( portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ) ) );
+uint32_t ulAsmAPIPriorityMask __attribute__( ( at( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ) );
+
+/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then
+a floating point context must be saved and restored for the task. */
+uint32_t ulPortTaskHasFPUContext = pdFALSE;
+
+/* Set to 1 to pend a context switch from an ISR. */
+uint32_t ulPortYieldRequired = pdFALSE;
+
+/* Counts the interrupt nesting depth. A context switch is only performed if
+if the nesting depth is 0. */
+uint32_t ulPortInterruptNesting = 0UL;
+
+/*-----------------------------------------------------------*/
+
+/*
+ * See header file for description.
+ */
+StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
+{
+ /* Setup the initial stack of the task. The stack is set exactly as
+ expected by the portRESTORE_CONTEXT() macro.
+
+ The fist real value on the stack is the status register, which is set for
+ system mode, with interrupts enabled. A few NULLs are added first to ensure
+ GDB does not try decoding a non-existent return address. */
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = NULL;
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) portINITIAL_SPSR;
+
+ if( ( ( uint32_t ) pxCode & portTHUMB_MODE_ADDRESS ) != 0x00UL )
+ {
+ /* The task will start in THUMB mode. */
+ *pxTopOfStack |= portTHUMB_MODE_BIT;
+ }
+
+ pxTopOfStack--;
+
+ /* Next the return address, which in this case is the start of the task. */
+ *pxTopOfStack = ( StackType_t ) pxCode;
+ pxTopOfStack--;
+
+ /* Next all the registers other than the stack pointer. */
+ *pxTopOfStack = ( StackType_t ) prvTaskExitError; /* R14 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x12121212; /* R12 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x11111111; /* R11 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x10101010; /* R10 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x09090909; /* R9 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x08080808; /* R8 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x07070707; /* R7 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x06060606; /* R6 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x05050505; /* R5 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x04040404; /* R4 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x03030303; /* R3 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x02020202; /* R2 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */
+ pxTopOfStack--;
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ pxTopOfStack--;
+
+ /* The task will start with a critical nesting count of 0 as interrupts are
+ enabled. */
+ *pxTopOfStack = portNO_CRITICAL_NESTING;
+ pxTopOfStack--;
+
+ /* The task will start without a floating point context. A task that uses
+ the floating point hardware must call vPortTaskUsesFPU() before executing
+ any floating point instructions. */
+ *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
+
+ return pxTopOfStack;
+}
+/*-----------------------------------------------------------*/
+
+static void prvTaskExitError( void )
+{
+ /* A function that implements a task must not exit or attempt to return to
+ its caller as there is nothing to return to. If a task wants to exit it
+ should instead call vTaskDelete( NULL ).
+
+ Artificially force an assert() to be triggered if configASSERT() is
+ defined, then stop here so application writers can catch the error. */
+ configASSERT( ulPortInterruptNesting == ~0UL );
+ portDISABLE_INTERRUPTS();
+ for( ;; );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler( void )
+{
+uint32_t ulAPSR;
+
+ /* Only continue if the CPU is not in User mode. The CPU must be in a
+ Privileged mode for the scheduler to start. */
+ __asm( "MRS ulAPSR, APSR" );
+ ulAPSR &= portAPSR_MODE_BITS_MASK;
+ configASSERT( ulAPSR != portAPSR_USER_MODE );
+
+ if( ulAPSR != portAPSR_USER_MODE )
+ {
+ /* Only continue if the binary point value is set to its lowest possible
+ setting. See the comments in vPortValidateInterruptPriority() below for
+ more information. */
+ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE );
+
+ if( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE )
+ {
+ /* Start the timer that generates the tick ISR. */
+ configSETUP_TICK_INTERRUPT();
+
+ __enable_irq();
+ vPortRestoreTaskContext();
+ }
+ }
+
+ /* Will only get here if vTaskStartScheduler() was called with the CPU in
+ a non-privileged mode or the binary point register was not set to its lowest
+ possible value. */
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler( void )
+{
+ /* Not implemented in ports where there is nothing to return to.
+ Artificially force an assert. */
+ configASSERT( ulCriticalNesting == 1000UL );
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical( void )
+{
+ /* Disable interrupts as per portDISABLE_INTERRUPTS(); */
+ ulPortSetInterruptMask();
+
+ /* Now interrupts are disabled ulCriticalNesting can be accessed
+ directly. Increment ulCriticalNesting to keep a count of how many times
+ portENTER_CRITICAL() has been called. */
+ ulCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ assert() if it is being called from an interrupt context. Only API
+ functions that end in "FromISR" can be used in an interrupt. Only assert if
+ the critical nesting count is 1 to protect against recursive calls if the
+ assert function also uses a critical section. */
+ if( ulCriticalNesting == 1 )
+ {
+ configASSERT( ulPortInterruptNesting == 0 );
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical( void )
+{
+ if( ulCriticalNesting > portNO_CRITICAL_NESTING )
+ {
+ /* Decrement the nesting count as the critical section is being
+ exited. */
+ ulCriticalNesting--;
+
+ /* If the nesting level has reached zero then all interrupt
+ priorities must be re-enabled. */
+ if( ulCriticalNesting == portNO_CRITICAL_NESTING )
+ {
+ /* Critical nesting has reached zero so all interrupt priorities
+ should be unmasked. */
+ portCLEAR_INTERRUPT_MASK();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+void FreeRTOS_Tick_Handler( void )
+{
+ /* Set interrupt mask before altering scheduler structures. The tick
+ handler runs at the lowest priority, so interrupts cannot already be masked,
+ so there is no need to save and restore the current mask value. */
+ __disable_irq();
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm( "DSB \n"
+ "ISB \n" );
+ __enable_irq();
+
+ /* Increment the RTOS tick. */
+ if( xTaskIncrementTick() != pdFALSE )
+ {
+ ulPortYieldRequired = pdTRUE;
+ }
+
+ /* Ensure all interrupt priorities are active again. */
+ portCLEAR_INTERRUPT_MASK();
+ configCLEAR_TICK_INTERRUPT();
+}
+/*-----------------------------------------------------------*/
+
+void vPortTaskUsesFPU( void )
+{
+uint32_t ulInitialFPSCR = 0;
+
+ /* A task is registering the fact that it needs an FPU context. Set the
+ FPU flag (which is saved as part of the task context). */
+ ulPortTaskHasFPUContext = pdTRUE;
+
+ /* Initialise the floating point status register. */
+ __asm( "FMXR FPSCR, ulInitialFPSCR" );
+}
+/*-----------------------------------------------------------*/
+
+void vPortClearInterruptMask( uint32_t ulNewMaskValue )
+{
+ if( ulNewMaskValue == pdFALSE )
+ {
+ portCLEAR_INTERRUPT_MASK();
+ }
+}
+/*-----------------------------------------------------------*/
+
+uint32_t ulPortSetInterruptMask( void )
+{
+uint32_t ulReturn;
+
+ __disable_irq();
+ if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
+ {
+ /* Interrupts were already masked. */
+ ulReturn = pdTRUE;
+ }
+ else
+ {
+ ulReturn = pdFALSE;
+ portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
+ __asm( "DSB \n"
+ "ISB \n" );
+ }
+ __enable_irq();
+
+ return ulReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if( configASSERT_DEFINED == 1 )
+
+ void vPortValidateInterruptPriority( void )
+ {
+ /* The following assertion will fail if a service routine (ISR) for
+ an interrupt that has been assigned a priority above
+ configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
+ function. ISR safe FreeRTOS API functions must *only* be called
+ from interrupts that have been assigned a priority at or below
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ Numerically low interrupt priority numbers represent logically high
+ interrupt priorities, therefore the priority of the interrupt must
+ be set to a value equal to or numerically *higher* than
+ configMAX_SYSCALL_INTERRUPT_PRIORITY.
+
+ FreeRTOS maintains separate thread and ISR API functions to ensure
+ interrupt entry is as fast and simple as possible.
+
+ The following links provide detailed information:
+ https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
+ https://www.FreeRTOS.org/FAQHelp.html */
+ configASSERT( portICCRPR_RUNNING_PRIORITY_REGISTER >= ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
+
+ /* Priority grouping: The interrupt controller (GIC) allows the bits
+ that define each interrupt's priority to be split between bits that
+ define the interrupt's pre-emption priority bits and bits that define
+ the interrupt's sub-priority. For simplicity all bits must be defined
+ to be pre-emption priority bits. The following assertion will fail if
+ this is not the case (if some bits represent a sub-priority).
+
+ The priority grouping is configured by the GIC's binary point register
+ (ICCBPR). Writting 0 to ICCBPR will ensure it is set to its lowest
+ possible value (which may be above 0). */
+ configASSERT( portICCBPR_BINARY_POINT_REGISTER <= portMAX_BINARY_POINT_VALUE );
+ }
+
+#endif /* configASSERT_DEFINED */
diff --git a/Source/portable/RVDS/ARM_CA9/portASM.s b/Source/portable/RVDS/ARM_CA9/portASM.s
new file mode 100644
index 0000000..bd36f2e
--- /dev/null
+++ b/Source/portable/RVDS/ARM_CA9/portASM.s
@@ -0,0 +1,171 @@
+;/*
+; * FreeRTOS Kernel V10.6.2
+; * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+; *
+; * SPDX-License-Identifier: MIT
+; *
+; * Permission is hereby granted, free of charge, to any person obtaining a copy of
+; * this software and associated documentation files (the "Software"), to deal in
+; * the Software without restriction, including without limitation the rights to
+; * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+; * the Software, and to permit persons to whom the Software is furnished to do so,
+; * subject to the following conditions:
+; *
+; * The above copyright notice and this permission notice shall be included in all
+; * copies or substantial portions of the Software.
+; *
+; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+; * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+; * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+; * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+; * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+; *
+; * https://www.FreeRTOS.org
+; * https://github.com/FreeRTOS
+; *
+; */
+
+ INCLUDE portmacro.inc
+
+ IMPORT vApplicationIRQHandler
+ IMPORT vTaskSwitchContext
+ IMPORT ulPortYieldRequired
+ IMPORT ulPortInterruptNesting
+ IMPORT vTaskSwitchContext
+ IMPORT ulICCIAR
+ IMPORT ulICCEOIR
+
+ EXPORT FreeRTOS_SWI_Handler
+ EXPORT FreeRTOS_IRQ_Handler
+ EXPORT vPortRestoreTaskContext
+
+ ARM
+ AREA PORT_ASM, CODE, READONLY
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; SVC handler is used to yield a task.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+FreeRTOS_SWI_Handler
+
+ PRESERVE8
+
+ ; Save the context of the current task and select a new task to run.
+ portSAVE_CONTEXT
+ LDR R0, =vTaskSwitchContext
+ BLX R0
+ portRESTORE_CONTEXT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; vPortRestoreTaskContext is used to start the scheduler.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+vPortRestoreTaskContext
+ ; Switch to system mode
+ CPS #SYS_MODE
+ portRESTORE_CONTEXT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PL390 GIC interrupt handler
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+FreeRTOS_IRQ_Handler
+
+ ; Return to the interrupted instruction.
+ SUB lr, lr, #4
+
+ ; Push the return address and SPSR
+ PUSH {lr}
+ MRS lr, SPSR
+ PUSH {lr}
+
+ ; Change to supervisor mode to allow reentry.
+ CPS #SVC_MODE
+
+ ; Push used registers.
+ PUSH {r0-r4, r12}
+
+ ; Increment nesting count. r3 holds the address of ulPortInterruptNesting
+ ; for future use. r1 holds the original ulPortInterruptNesting value for
+ ; future use.
+ LDR r3, =ulPortInterruptNesting
+ LDR r1, [r3]
+ ADD r4, r1, #1
+ STR r4, [r3]
+
+ ; Read value from the interrupt acknowledge register, which is stored in r0
+ ; for future parameter and interrupt clearing use.
+ LDR r2, =ulICCIAR
+ LDR r0, [r2]
+
+ ; Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for
+ ; future use. _RB_ Does this ever actually need to be done provided the
+ ; start of the stack is 8-byte aligned?
+ MOV r2, sp
+ AND r2, r2, #4
+ SUB sp, sp, r2
+
+ ; Call the interrupt handler. r4 is pushed to maintain alignment.
+ PUSH {r0-r4, lr}
+ LDR r1, =vApplicationIRQHandler
+ BLX r1
+ POP {r0-r4, lr}
+ ADD sp, sp, r2
+
+ CPSID i
+
+ ; Write the value read from ICCIAR to ICCEOIR
+ LDR r4, =ulICCEOIR
+ STR r0, [r4]
+
+ ; Restore the old nesting count
+ STR r1, [r3]
+
+ ; A context switch is never performed if the nesting count is not 0
+ CMP r1, #0
+ BNE exit_without_switch
+
+ ; Did the interrupt request a context switch? r1 holds the address of
+ ; ulPortYieldRequired and r0 the value of ulPortYieldRequired for future
+ ; use.
+ LDR r1, =ulPortYieldRequired
+ LDR r0, [r1]
+ CMP r0, #0
+ BNE switch_before_exit
+
+exit_without_switch
+ ; No context switch. Restore used registers, LR_irq and SPSR before
+ ; returning.
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ MOVS PC, LR
+
+switch_before_exit
+ ; A context swtich is to be performed. Clear the context switch pending
+ ; flag.
+ MOV r0, #0
+ STR r0, [r1]
+
+ ; Restore used registers, LR-irq and SPSR before saving the context
+ ; to the task stack.
+ POP {r0-r4, r12}
+ CPS #IRQ_MODE
+ POP {LR}
+ MSR SPSR_cxsf, LR
+ POP {LR}
+ portSAVE_CONTEXT
+
+ ; Call the function that selects the new task to execute.
+ ; vTaskSwitchContext() if vTaskSwitchContext() uses LDRD or STRD
+ ; instructions, or 8 byte aligned stack allocated data. LR does not need
+ ; saving as a new LR will be loaded by portRESTORE_CONTEXT anyway.
+ LDR r0, =vTaskSwitchContext
+ BLX r0
+
+ ; Restore the context of, and branch to, the task selected to execute next.
+ portRESTORE_CONTEXT
+
+
+ END
diff --git a/Source/portable/RVDS/ARM_CA9/portmacro.h b/Source/portable/RVDS/ARM_CA9/portmacro.h
new file mode 100644
index 0000000..35e4887
--- /dev/null
+++ b/Source/portable/RVDS/ARM_CA9/portmacro.h
@@ -0,0 +1,169 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/*-----------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *-----------------------------------------------------------
+ */
+
+/* Type definitions. */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
+
+
+#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+
+ /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
+ not need to be guarded with a critical section. */
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
+/*-----------------------------------------------------------*/
+
+/* Hardware specifics. */
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
+
+/*-----------------------------------------------------------*/
+
+/* Task utilities. */
+
+/* Called at the end of an ISR that can cause a context switch. */
+#define portEND_SWITCHING_ISR( xSwitchRequired )\
+{ \
+extern uint32_t ulPortYieldRequired; \
+ \
+ if( xSwitchRequired != pdFALSE ) \
+ { \
+ ulPortYieldRequired = pdTRUE; \
+ } \
+}
+
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+#define portYIELD() __asm( "SWI 0" );
+
+
+/*-----------------------------------------------------------
+ * Critical section control
+ *----------------------------------------------------------*/
+
+extern void vPortEnterCritical( void );
+extern void vPortExitCritical( void );
+extern uint32_t ulPortSetInterruptMask( void );
+extern void vPortClearInterruptMask( uint32_t ulNewMaskValue );
+
+/* These macros do not globally disable/enable interrupts. They do mask off
+interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
+#define portENTER_CRITICAL() vPortEnterCritical();
+#define portEXIT_CRITICAL() vPortExitCritical();
+#define portDISABLE_INTERRUPTS() ulPortSetInterruptMask()
+#define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 )
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
+
+/*-----------------------------------------------------------*/
+
+/* Task function macros as described on the FreeRTOS.org WEB site. These are
+not required for this port but included in case common demo code that uses these
+macros is used. */
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
+
+/* Prototype of the FreeRTOS tick handler. This must be installed as the
+handler for whichever peripheral is used to generate the RTOS tick. */
+void FreeRTOS_Tick_Handler( void );
+
+/* Any task that uses the floating point unit MUST call vPortTaskUsesFPU()
+before any floating point instructions are executed. */
+void vPortTaskUsesFPU( void );
+#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
+
+#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
+#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL )
+
+/* Architecture specific optimisations. */
+#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
+ #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
+#endif
+
+#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
+
+ /* Store/clear the ready priorities in a bit map. */
+ #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
+ #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
+
+ /*-----------------------------------------------------------*/
+
+ #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __clz( uxReadyPriorities ) )
+
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+#ifdef configASSERT
+ void vPortValidateInterruptPriority( void );
+ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
+#endif
+
+#define portNOP() __nop()
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
+
+#endif /* PORTMACRO_H */
diff --git a/Source/portable/RVDS/ARM_CA9/portmacro.inc b/Source/portable/RVDS/ARM_CA9/portmacro.inc
new file mode 100644
index 0000000..cfcdc58
--- /dev/null
+++ b/Source/portable/RVDS/ARM_CA9/portmacro.inc
@@ -0,0 +1,120 @@
+;/*
+; * FreeRTOS Kernel V10.6.2
+; * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+; *
+; * SPDX-License-Identifier: MIT
+; *
+; * Permission is hereby granted, free of charge, to any person obtaining a copy of
+; * this software and associated documentation files (the "Software"), to deal in
+; * the Software without restriction, including without limitation the rights to
+; * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+; * the Software, and to permit persons to whom the Software is furnished to do so,
+; * subject to the following conditions:
+; *
+; * The above copyright notice and this permission notice shall be included in all
+; * copies or substantial portions of the Software.
+; *
+; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+; * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+; * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+; * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+; * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+; *
+; * https://www.FreeRTOS.org
+; * https://github.com/FreeRTOS
+; *
+; */
+
+SYS_MODE EQU 0x1f
+SVC_MODE EQU 0x13
+IRQ_MODE EQU 0x12
+
+ IMPORT ulCriticalNesting
+ IMPORT pxCurrentTCB
+ IMPORT ulPortTaskHasFPUContext
+ IMPORT ulAsmAPIPriorityMask
+ IMPORT ulICCPMR
+
+
+ MACRO
+ portSAVE_CONTEXT
+
+ ; Save the LR and SPSR onto the system mode stack before switching to
+ ; system mode to save the remaining system mode registers
+ SRSDB sp!, #SYS_MODE
+ CPS #SYS_MODE
+ PUSH {R0-R12, R14}
+
+ ; Push the critical nesting count
+ LDR R2, =ulCriticalNesting
+ LDR R1, [R2]
+ PUSH {R1}
+
+ ; Does the task have a floating point context that needs saving? If
+ ; ulPortTaskHasFPUContext is 0 then no.
+ LDR R2, =ulPortTaskHasFPUContext
+ LDR R3, [R2]
+ CMP R3, #0
+
+ ; Save the floating point context, if any
+ FMRXNE R1, FPSCR
+ VPUSHNE {D0-D15}
+ VPUSHNE {D16-D31}
+ PUSHNE {R1}
+
+ ; Save ulPortTaskHasFPUContext itself
+ PUSH {R3}
+
+ ; Save the stack pointer in the TCB
+ LDR R0, =pxCurrentTCB
+ LDR R1, [R0]
+ STR SP, [R1]
+
+ MEND
+
+; /**********************************************************************/
+
+ MACRO
+ portRESTORE_CONTEXT
+
+ ; Set the SP to point to the stack of the task being restored.
+ LDR R0, =pxCurrentTCB
+ LDR R1, [R0]
+ LDR SP, [R1]
+
+ ; Is there a floating point context to restore? If the restored
+ ; ulPortTaskHasFPUContext is zero then no.
+ LDR R0, =ulPortTaskHasFPUContext
+ POP {R1}
+ STR R1, [R0]
+ CMP R1, #0
+
+ ; Restore the floating point context, if any
+ POPNE {R0}
+ VPOPNE {D16-D31}
+ VPOPNE {D0-D15}
+ VMSRNE FPSCR, R0
+
+ ; Restore the critical section nesting depth
+ LDR R0, =ulCriticalNesting
+ POP {R1}
+ STR R1, [R0]
+
+ ; Ensure the priority mask is correct for the critical nesting depth
+ LDR R2, =ulICCPMR
+ CMP R1, #0
+ MOVEQ R4, #255
+ LDRNE R4, =ulAsmAPIPriorityMask
+ STR R4, [r2]
+
+ ; Restore all system mode registers other than the SP (which is already
+ ; being used)
+ POP {R0-R12, R14}
+
+ ; Return to the task code, loading CPSR on the way.
+ RFEIA sp!
+
+ MEND
+
+ END
diff --git a/Source/portable/RVDS/ARM_CM0/port.c b/Source/portable/RVDS/ARM_CM0/port.c
index d82e33b..169e262 100644
--- a/Source/portable/RVDS/ARM_CM0/port.c
+++ b/Source/portable/RVDS/ARM_CM0/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
diff --git a/Source/portable/RVDS/ARM_CM0/portmacro.h b/Source/portable/RVDS/ARM_CM0/portmacro.h
index 8b4c0c4..e75c8ef 100644
--- a/Source/portable/RVDS/ARM_CM0/portmacro.h
+++ b/Source/portable/RVDS/ARM_CM0/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -47,74 +47,113 @@
*/
/* Type definitions. */
- #define portCHAR char
- #define portFLOAT float
- #define portDOUBLE double
- #define portLONG long
- #define portSHORT short
- #define portSTACK_TYPE uint32_t
- #define portBASE_TYPE long
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE uint32_t
+#define portBASE_TYPE long
- typedef portSTACK_TYPE StackType_t;
- typedef long BaseType_t;
- typedef unsigned long UBaseType_t;
+typedef portSTACK_TYPE StackType_t;
+typedef long BaseType_t;
+typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
- typedef uint16_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffff
- #else
- typedef uint32_t TickType_t;
- #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
+ typedef uint16_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffff
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
+ typedef uint32_t TickType_t;
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
- #define portTICK_TYPE_IS_ATOMIC 1
- #endif
+ #define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
+#endif
/*-----------------------------------------------------------*/
/* Architecture specifics. */
- #define portSTACK_GROWTH ( -1 )
- #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
- #define portBYTE_ALIGNMENT 8
+#define portSTACK_GROWTH ( -1 )
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
+#define portBYTE_ALIGNMENT 8
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
- extern void vPortYield( void );
- #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
- #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portYIELD() vPortYield()
- #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 )
- #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
+extern void vPortYield( void );
+#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
+#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
+#define portYIELD() vPortYield()
+#define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \
+ while( 0 )
+#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Critical section management. */
- extern void vPortEnterCritical( void );
- extern void vPortExitCritical( void );
- extern uint32_t ulSetInterruptMaskFromISR( void );
- extern void vClearInterruptMaskFromISR( uint32_t ulMask );
+extern void vPortEnterCritical( void );
+extern void vPortExitCritical( void );
+extern uint32_t ulSetInterruptMaskFromISR( void );
+extern void vClearInterruptMaskFromISR( uint32_t ulMask );
- #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
- #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
- #define portDISABLE_INTERRUPTS() __disable_irq()
- #define portENABLE_INTERRUPTS() __enable_irq()
- #define portENTER_CRITICAL() vPortEnterCritical()
- #define portEXIT_CRITICAL() vPortExitCritical()
+#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
+#define portDISABLE_INTERRUPTS() __disable_irq()
+#define portENABLE_INTERRUPTS() __enable_irq()
+#define portENTER_CRITICAL() vPortEnterCritical()
+#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/* Tickless idle/low power functionality. */
- #ifndef portSUPPRESS_TICKS_AND_SLEEP
- extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
- #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
- #endif
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+ extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
+ #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
+#endif
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
- #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
- #define portNOP()
+#define portNOP()
+
+#define portINLINE __inline
+
+#ifndef portFORCE_INLINE
+ #define portFORCE_INLINE __forceinline
+#endif
+
+/*-----------------------------------------------------------*/
+
+static portFORCE_INLINE BaseType_t xPortIsInsideInterrupt( void )
+{
+ uint32_t ulCurrentInterrupt;
+ BaseType_t xReturn;
+
+ /* Obtain the number of the currently executing interrupt. */
+ __asm
+ {
+/* *INDENT-OFF* */
+ mrs ulCurrentInterrupt, ipsr
+/* *INDENT-ON* */
+ }
+
+ if( ulCurrentInterrupt == 0 )
+ {
+ xReturn = pdFALSE;
+ }
+ else
+ {
+ xReturn = pdTRUE;
+ }
+
+ return xReturn;
+}
+
+/*-----------------------------------------------------------*/
/* *INDENT-OFF* */
#ifdef __cplusplus
diff --git a/Source/portable/RVDS/ARM_CM3/port.c b/Source/portable/RVDS/ARM_CM3/port.c
index 167ce78..d39491d 100644
--- a/Source/portable/RVDS/ARM_CM3/port.c
+++ b/Source/portable/RVDS/ARM_CM3/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -34,10 +34,6 @@
#include "FreeRTOS.h"
#include "task.h"
-#ifndef configKERNEL_INTERRUPT_PRIORITY
- #define configKERNEL_INTERRUPT_PRIORITY 255
-#endif
-
#if configMAX_SYSCALL_INTERRUPT_PRIORITY == 0
#error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
#endif
@@ -65,8 +61,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -266,7 +263,8 @@
{
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -276,7 +274,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -285,40 +283,56 @@
/* Read the value back to see how many bits stuck. */
ucMaxPriorityValue = *pucFirstUserPriorityRegister;
- /* The kernel interrupt priority should be set to the lowest
- * priority. */
- configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) );
-
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -327,7 +341,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
@@ -739,10 +753,10 @@
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
- * Interrupts that use the FreeRTOS API must not be left at their
- * default priority of zero as that is the highest possible priority,
+ * Interrupts that use the FreeRTOS API must not be left at their
+ * default priority of zero as that is the highest possible priority,
* which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
- * and therefore also guaranteed to be invalid.
+ * and therefore also guaranteed to be invalid.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible.
diff --git a/Source/portable/RVDS/ARM_CM3/portmacro.h b/Source/portable/RVDS/ARM_CM3/portmacro.h
index db1c44e..2455220 100644
--- a/Source/portable/RVDS/ARM_CM3/portmacro.h
+++ b/Source/portable/RVDS/ARM_CM3/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -59,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
diff --git a/Source/portable/RVDS/ARM_CM4F/port.c b/Source/portable/RVDS/ARM_CM4F/port.c
index 9c0892a..05ef20c 100644
--- a/Source/portable/RVDS/ARM_CM4F/port.c
+++ b/Source/portable/RVDS/ARM_CM4F/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -71,8 +71,9 @@
#define portCORTEX_M7_r0p1_ID ( 0x410FC271UL )
#define portCORTEX_M7_r0p0_ID ( 0x410FC270UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -328,7 +329,8 @@
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -338,7 +340,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -347,40 +349,56 @@
/* Read the value back to see how many bits stuck. */
ucMaxPriorityValue = *pucFirstUserPriorityRegister;
- /* The kernel interrupt priority should be set to the lowest
- * priority. */
- configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) );
-
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -389,7 +407,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
diff --git a/Source/portable/RVDS/ARM_CM4F/portmacro.h b/Source/portable/RVDS/ARM_CM4F/portmacro.h
index 21ac481..d79c9b4 100644
--- a/Source/portable/RVDS/ARM_CM4F/portmacro.h
+++ b/Source/portable/RVDS/ARM_CM4F/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -59,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
diff --git a/Source/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/Source/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..80f0ee1
--- /dev/null
+++ b/Source/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,1750 @@
+/*
+ * FreeRTOS Kernel V10.6.2
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+#include "mpu_syscall_numbers.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskDelayUntilImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+MPU_xTaskDelayUntil_Priv
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntil_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskDelayUntil
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskAbortDelayImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+MPU_xTaskAbortDelay_Priv
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelay_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskAbortDelay
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskDelayImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+MPU_vTaskDelay_Priv
+ pop {r0}
+ b MPU_vTaskDelayImpl
+MPU_vTaskDelay_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskDelay
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskPriorityGetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+MPU_uxTaskPriorityGet_Priv
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGet_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskPriorityGet
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_eTaskGetStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+MPU_eTaskGetState_Priv
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+MPU_eTaskGetState_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_eTaskGetState
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskGetInfoImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+MPU_vTaskGetInfo_Priv
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfo_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskGetInfo
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetIdleTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+MPU_xTaskGetIdleTaskHandle_Priv
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandle_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSuspendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+MPU_vTaskSuspend_Priv
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+MPU_vTaskSuspend_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSuspend
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskResumeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+MPU_vTaskResume_Priv
+ pop {r0}
+ b MPU_vTaskResumeImpl
+MPU_vTaskResume_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskResume
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTaskGetTickCount( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetTickCountImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+MPU_xTaskGetTickCount_Priv
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCount_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetTickCount
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetNumberOfTasksImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+MPU_uxTaskGetNumberOfTasks_Priv
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasks_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL;
+
+__asm char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcTaskGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+MPU_pcTaskGetName_Priv
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+MPU_pcTaskGetName_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_pcTaskGetName
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetRunTimeCounterImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+MPU_ulTaskGetRunTimeCounter_Priv
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounter_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetRunTimePercentImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+MPU_ulTaskGetRunTimePercent_Priv
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercent_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetRunTimePercent
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetIdleRunTimePercentImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+MPU_ulTaskGetIdleRunTimePercent_Priv
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+MPU_ulTaskGetIdleRunTimeCounter_Priv
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetApplicationTaskTagImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+MPU_vTaskSetApplicationTaskTag_Priv
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTag_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetApplicationTaskTagImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+MPU_xTaskGetApplicationTaskTag_Priv
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTag_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+MPU_vTaskSetThreadLocalStoragePointer_Priv
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) FREERTOS_SYSTEM_CALL;
+
+__asm void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+MPU_pvTaskGetThreadLocalStoragePointer_Priv
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetSystemStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+MPU_uxTaskGetSystemState_Priv
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemState_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetSystemState
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetStackHighWaterMarkImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+MPU_uxTaskGetStackHighWaterMark_Priv
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMark_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetStackHighWaterMark2Impl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+MPU_uxTaskGetStackHighWaterMark2_Priv
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetCurrentTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+MPU_xTaskGetCurrentTaskHandle_Priv
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandle_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGetSchedulerState( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetSchedulerStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+MPU_xTaskGetSchedulerState_Priv
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerState_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGetSchedulerState
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetTimeOutStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+MPU_vTaskSetTimeOutState_Priv
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutState_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTaskSetTimeOutState
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskCheckForTimeOutImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+MPU_xTaskCheckForTimeOut_Priv
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOut_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskCheckForTimeOut
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+MPU_xTaskGenericNotify_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotify_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotify
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyWaitImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+MPU_xTaskGenericNotifyWait_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWait_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyWait
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGenericNotifyTakeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+MPU_ulTaskGenericNotifyTake_Priv
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTake_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyTake
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyStateClearImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+MPU_xTaskGenericNotifyStateClear_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClear_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGenericNotifyValueClearImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+MPU_ulTaskGenericNotifyValueClear_Priv
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClear_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGenericSendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+MPU_xQueueGenericSend_Priv
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSend_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGenericSend
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxQueueMessagesWaitingImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+MPU_uxQueueMessagesWaiting_Priv
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaiting_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueMessagesWaiting
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxQueueSpacesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+MPU_uxQueueSpacesAvailable_Priv
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailable_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxQueueSpacesAvailable
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueReceiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+MPU_xQueueReceive_Priv
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+MPU_xQueueReceive_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueReceive
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueuePeekImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+MPU_xQueuePeek_Priv
+ pop {r0}
+ b MPU_xQueuePeekImpl
+MPU_xQueuePeek_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueuePeek
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueSemaphoreTakeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+MPU_xQueueSemaphoreTake_Priv
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTake_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSemaphoreTake
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGetMutexHolderImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+MPU_xQueueGetMutexHolder_Priv
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolder_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGetMutexHolder
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueTakeMutexRecursiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+MPU_xQueueTakeMutexRecursive_Priv
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursive_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueTakeMutexRecursive
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGiveMutexRecursiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+MPU_xQueueGiveMutexRecursive_Priv
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursive_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueGiveMutexRecursive
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueSelectFromSetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+MPU_xQueueSelectFromSet_Priv
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSet_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueSelectFromSet
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueAddToSetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+MPU_xQueueAddToSet_Priv
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSet_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xQueueAddToSet
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vQueueAddToRegistryImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+MPU_vQueueAddToRegistry_Priv
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistry_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueAddToRegistry
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vQueueUnregisterQueueImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+MPU_vQueueUnregisterQueue_Priv
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueue_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vQueueUnregisterQueue
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcQueueGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+MPU_pcQueueGetName_Priv
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+MPU_pcQueueGetName_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_pcQueueGetName
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pvTimerGetTimerIDImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+MPU_pvTimerGetTimerID_Priv
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerID_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_pvTimerGetTimerID
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTimerSetTimerIDImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+MPU_vTimerSetTimerID_Priv
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerID_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetTimerID
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerIsTimerActiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+MPU_xTimerIsTimerActive_Priv
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActive_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerIsTimerActive
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+MPU_xTimerGetTimerDaemonTaskHandle_Priv
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGenericCommandPrivImpl
+
+ push {r0}
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+MPU_xTimerGenericCommand_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGenericCommand
+MPU_xTimerGenericCommand_Priv
+ pop {r0}
+ b MPU_xTimerGenericCommandPrivImpl
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcTimerGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+MPU_pcTimerGetName_Priv
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+MPU_pcTimerGetName_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_pcTimerGetName
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTimerSetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+MPU_vTimerSetReloadMode_Priv
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadMode_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vTimerSetReloadMode
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+MPU_xTimerGetReloadMode_Priv
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadMode_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetReloadMode
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTimerGetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+MPU_uxTimerGetReloadMode_Priv
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadMode_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxTimerGetReloadMode
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetPeriodImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+MPU_xTimerGetPeriod_Priv
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriod_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetPeriod
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetExpiryTimeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+MPU_xTimerGetExpiryTime_Priv
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTime_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xTimerGetExpiryTime
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupWaitBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+MPU_xEventGroupWaitBits_Priv
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBits_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupWaitBits
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupClearBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+MPU_xEventGroupClearBits_Priv
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBits_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupClearBits
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupSetBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+MPU_xEventGroupSetBits_Priv
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBits_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSetBits
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupSyncImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+MPU_xEventGroupSync_Priv
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+MPU_xEventGroupSync_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xEventGroupSync
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxEventGroupGetNumberImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+MPU_uxEventGroupGetNumber_Priv
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumber_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_uxEventGroupGetNumber
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vEventGroupSetNumberImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+MPU_vEventGroupSetNumber_Priv
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumber_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_vEventGroupSetNumber
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+MPU_xStreamBufferSend_Priv
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSend_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSend
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferReceiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+MPU_xStreamBufferReceive_Priv
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceive_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferReceive
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferIsFullImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+MPU_xStreamBufferIsFull_Priv
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFull_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsFull
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferIsEmptyImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+MPU_xStreamBufferIsEmpty_Priv
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmpty_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferIsEmpty
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSpacesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+MPU_xStreamBufferSpacesAvailable_Priv
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailable_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferBytesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+MPU_xStreamBufferBytesAvailable_Priv
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailable_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferBytesAvailable
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSetTriggerLevelImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+MPU_xStreamBufferSetTriggerLevel_Priv
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevel_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferNextMessageLengthBytesImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+MPU_xStreamBufferNextMessageLengthBytes_Priv
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ pop {r0}
+ svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
+}
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/Source/portable/RVDS/ARM_CM4_MPU/port.c b/Source/portable/RVDS/ARM_CM4_MPU/port.c
index d9dbf66..b5c4cb9 100644
--- a/Source/portable/RVDS/ARM_CM4_MPU/port.c
+++ b/Source/portable/RVDS/ARM_CM4_MPU/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -38,6 +38,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
+#include "mpu_syscall_numbers.h"
#ifndef __TARGET_FPU_VFP
#error This port can only be used when the project options are configured to enable hardware floating point support.
@@ -83,8 +84,9 @@
#define portNVIC_SYSTICK_CLK ( 0x00000004UL )
#define portNVIC_SYSTICK_INT ( 0x00000002UL )
#define portNVIC_SYSTICK_ENABLE ( 0x00000001UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
#define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL )
/* Constants required to manipulate the VFP. */
@@ -107,17 +109,47 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/* Each task maintains its own interrupt status in the critical nesting
* variable. Note this is not saved as part of the task context as context
* switches can only occur when uxCriticalNesting is zero. */
-static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
+PRIVILEGED_DATA static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+/*
+ * This variable is set to pdTRUE when the scheduler is started.
+ */
+ PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
+
+#endif
/*
* Setup the timer to generate the tick interrupts.
@@ -157,7 +189,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION;
/*
* Function to enable the VFP.
@@ -200,7 +232,7 @@
/**
* @brief Enter critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@@ -209,11 +241,66 @@
/**
* @brief Exit from critical section.
*/
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Triggers lazy stacking of FPU registers.
+ */
+ static void prvTriggerLazyStacking( void ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ * @param ucSystemCallNumber The system call number of the system call.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+/**
+ * @brief Raise SVC for exiting from a system call.
+ */
+ void vRequestSystemCallExit( void ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/*
@@ -222,48 +309,68 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
-void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam )
{
uint8_t ucSVCNumber;
- uint32_t ulReg, ulPC;
+ uint32_t ulPC;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ uint32_t ulReg;
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
extern uint32_t __syscalls_flash_start__;
extern uint32_t __syscalls_flash_end__;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@@ -289,50 +396,328 @@
break;
- #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
- case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
- * svc was raised from any of the
- * system calls. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 1 )
+ #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
+ case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
+ * svc was raised from any of the
+ * system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- __asm
- {
-/* *INDENT-OFF* */
- mrs ulReg, control /* Obtain current control value. */
- bic ulReg, # 1 /* Set privilege bit. */
- msr control, ulReg /* Write back new control value. */
-/* *INDENT-ON* */
- }
- }
-
- break;
- #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
- case portSVC_RAISE_PRIVILEGE:
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
__asm
{
-/* *INDENT-OFF* */
+ /* *INDENT-OFF* */
mrs ulReg, control /* Obtain current control value. */
bic ulReg, # 1 /* Set privilege bit. */
msr control, ulReg /* Write back new control value. */
-/* *INDENT-ON* */
+ /* *INDENT-ON* */
}
- break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ }
- default: /* Unknown SVC call. */
- break;
+ break;
+ #else /* if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ case portSVC_RAISE_PRIVILEGE:
+ __asm
+ {
+ /* *INDENT-OFF* */
+ mrs ulReg, control /* Obtain current control value. */
+ bic ulReg, # 1 /* Set privilege bit. */
+ msr control, ulReg /* Write back new control value. */
+ /* *INDENT-ON* */
+ }
+ break;
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
+
+ default: /* Unknown SVC call. */
+ break;
}
}
/*-----------------------------------------------------------*/
-__asm void vPortSVCHandler( void )
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ __asm void prvTriggerLazyStacking( void ) /* PRIVILEGED_FUNCTION */
+ {
+ /* *INDENT-OFF* */
+ PRESERVE8
+
+ vpush {s0} /* Trigger lazy stacking. */
+ vpop {s0} /* Nullify the affect of the above instruction. */
+
+ /* *INDENT-ON* */
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallEnter( uint32_t * pulTaskStack,
+ uint32_t ulLR,
+ uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1;
+ extern uint32_t __syscalls_flash_start__;
+ extern uint32_t __syscalls_flash_end__;
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the system call section (i.e. application is
+ * not raising SVC directly).
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
+ * it is non-NULL only during the execution of a system call (i.e.
+ * between system call enter and exit).
+ * 3. System call is not for a kernel API disabled by the configuration
+ * in FreeRTOSConfig.h.
+ * 4. We do not need to check that ucSystemCallNumber is within range
+ * because the assembly SVC handler checks that before calling
+ * this function.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
+ ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
+ {
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ prvTriggerLazyStacking();
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm
+ {
+ msr psp, pulSystemCallStack
+ };
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm
+ {
+ mrs r1, control /* Obtain current control value. */
+ bic r1, #1 /* Clear nPRIV bit. */
+ msr control, r1 /* Write back new control value. */
+ };
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised.
+ * It contains the address of the caller of the System Call entry
+ * point (i.e. the caller of the MPU_<API>). We need to restore it
+ * when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Start executing the system call upon returning from this handler. */
+ pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
+
+ /* Raise a request to exit from the system call upon finishing the
+ * system call. */
+ pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ __asm void vRequestSystemCallExit( void ) /* PRIVILEGED_FUNCTION */
+ {
+ PRESERVE8
+
+ svc #portSVC_SYSTEM_CALL_EXIT
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ void vSystemCallExit( uint32_t * pulSystemCallStack,
+ uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+ {
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1;
+ extern uint32_t __privileged_functions_start__;
+ extern uint32_t __privileged_functions_end__;
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+
+ /* Checks:
+ * 1. SVC is raised from the privileged code (i.e. application is not
+ * raising SVC directly). This SVC is only raised from
+ * vRequestSystemCallExit which is in the privileged code section.
+ * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
+ * this means that we previously entered a system call and the
+ * application is not attempting to exit without entering a system
+ * call.
+ */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
+ ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
+ {
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ prvTriggerLazyStacking();
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm
+ {
+ msr psp, pulTaskStack
+ };
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm
+ {
+ mrs r1, control /* Obtain current control value. */
+ orr r1, #1 /* Set nPRIV bit. */
+ msr control, r1 /* Write back new control value. */
+ };
+
+ /* Return to the caller of the System Call entry point (i.e. the
+ * caller of the MPU_<API>). */
+ pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ /* Ensure that LR has a valid value.*/
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ }
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
- extern prvSVCHandler
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/* *INDENT-OFF* */
- PRESERVE8
+__asm void vPortSVCHandler( void )
+{
+ extern vSVCHandler_C
+ extern vSystemCallEnter
+ extern vSystemCallExit
+
+ PRESERVE8
+
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #NUM_SYSTEM_CALLS
+ blt syscall_enter
+ cmp r2, #portSVC_SYSTEM_CALL_EXIT
+ beq syscall_exit
+ b vSVCHandler_C
+
+syscall_enter
+ mov r1, lr
+ b vSystemCallEnter
+
+syscall_exit
+ mov r1, lr
+ b vSystemCallExit
+}
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+__asm void vPortSVCHandler( void )
+{
+ extern vSVCHandler_C
+
+ PRESERVE8
/* Assumes psp was in use. */
#ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
@@ -344,9 +729,11 @@
mrs r0, psp
#endif
- b prvSVCHandler
-/* *INDENT-ON* */
+ b vSVCHandler_C
}
+/* *INDENT-ON* */
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
__asm void prvRestoreContextOfFirstTask( void )
@@ -354,45 +741,54 @@
/* *INDENT-OFF* */
PRESERVE8
- ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [ r0 ]
- ldr r0, [ r0 ]
- msr msp, r0 /* Set the msp back to the start of the stack. */
- ldr r3, =pxCurrentTCB /* Restore the context. */
- ldr r1, [ r3 ]
- ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */
- add r1, r1, #4 /* Move onto the second item in the TCB... */
+ ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0]
+ ldr r0, [r0]
+ msr msp, r0 /* Set the msp back to the start of the stack. */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [ r2 ] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- ldr r2, =0xe000ed9c /* Region Base Address register. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
+
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
#if ( configTOTAL_MPU_REGIONS == 16 )
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
#endif /* configTOTAL_MPU_REGIONS == 16. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [ r2 ] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
- ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
msr control, r3
- msr psp, r0 /* Restore the task stack pointer. */
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+
mov r0, #0
msr basepri, r0
- bx r14
- nop
+ bx lr
/* *INDENT-ON* */
}
/*-----------------------------------------------------------*/
@@ -419,66 +815,87 @@
#endif
#if ( configASSERT_DEFINED == 1 )
+ {
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
+ volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
+ volatile uint8_t ucMaxPriorityValue;
+
+ /* Determine the maximum priority from which ISR safe FreeRTOS API
+ * functions can be called. ISR safe functions are those that end in
+ * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
+ * ensure interrupt entry is as fast and simple as possible.
+ *
+ * Save the interrupt priority value that is about to be clobbered. */
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
+
+ /* Determine the number of priority bits available. First write to all
+ * possible bits. */
+ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
+
+ /* Read the value back to see how many bits stuck. */
+ ucMaxPriorityValue = *pucFirstUserPriorityRegister;
+
+ /* Use the same mask on the maximum system call priority. */
+ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
+ /* Calculate the maximum acceptable priority group value for the number
+ * of bits read back. */
+
+ while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- volatile uint32_t ulOriginalPriority;
- volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
- volatile uint8_t ucMaxPriorityValue;
-
- /* Determine the maximum priority from which ISR safe FreeRTOS API
- * functions can be called. ISR safe functions are those that end in
- * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
- * ensure interrupt entry is as fast and simple as possible.
- *
- * Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
-
- /* Determine the number of priority bits available. First write to all
- * possible bits. */
- *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
-
- /* Read the value back to see how many bits stuck. */
- ucMaxPriorityValue = *pucFirstUserPriorityRegister;
-
- /* Use the same mask on the maximum system call priority. */
- ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
-
- /* Calculate the maximum acceptable priority group value for the number
- * of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
-
- while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
- {
- ulMaxPRIGROUPValue--;
- ucMaxPriorityValue <<= ( uint8_t ) 0x01;
- }
-
- #ifdef __NVIC_PRIO_BITS
- {
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
- }
- #endif
-
- #ifdef configPRIO_BITS
- {
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
- }
- #endif
-
- /* Shift the priority group value back to its position within the AIRCR
- * register. */
- ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
- ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
-
- /* Restore the clobbered interrupt priority register to its original
- * value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ ulImplementedPrioBits++;
+ ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
+
+ if( ulImplementedPrioBits == 8 )
+ {
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
+ }
+ else
+ {
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
+ }
+
+ /* Shift the priority group value back to its position within the AIRCR
+ * register. */
+ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
+ ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
+
+ /* Restore the clobbered interrupt priority register to its original
+ * value. */
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
+ }
#endif /* configASSERT_DEFINED */
/* Make PendSV and SysTick the same priority as the kernel, and the SVC
@@ -497,6 +914,12 @@
/* Initialise the critical nesting count ready for the first task. */
uxCriticalNesting = 0;
+ #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+ {
+ xSchedulerRunning = pdTRUE;
+ }
+ #endif
+
/* Ensure the VFP is enabled - it should be anyway. */
vPortEnableVFP();
@@ -550,39 +973,63 @@
void vPortEnterCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ portDISABLE_INTERRUPTS();
+ uxCriticalNesting++;
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
- }
-#else
- portDISABLE_INTERRUPTS();
- uxCriticalNesting++;
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
-#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
- if( portIS_PRIVILEGED() == pdFALSE )
- {
- portRAISE_PRIVILEGE();
- portMEMORY_BARRIER();
+ #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
+ if( portIS_PRIVILEGED() == pdFALSE )
+ {
+ portRAISE_PRIVILEGE();
+ portMEMORY_BARRIER();
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+
+ portMEMORY_BARRIER();
+
+ portRESET_PRIVILEGE();
+ portMEMORY_BARRIER();
+ }
+ else
+ {
+ configASSERT( uxCriticalNesting );
+ uxCriticalNesting--;
+
+ if( uxCriticalNesting == 0 )
+ {
+ portENABLE_INTERRUPTS();
+ }
+ }
+ #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@@ -590,30 +1037,7 @@
{
portENABLE_INTERRUPTS();
}
- portMEMORY_BARRIER();
-
- portRESET_PRIVILEGE();
- portMEMORY_BARRIER();
- }
- else
- {
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
- }
-#else
- configASSERT( uxCriticalNesting );
- uxCriticalNesting--;
-
- if( uxCriticalNesting == 0 )
- {
- portENABLE_INTERRUPTS();
- }
-#endif
+ #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@@ -626,72 +1050,90 @@
/* *INDENT-OFF* */
PRESERVE8
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location where the context should be saved. */
+
+ /*------------ Save Context. ----------- */
+ mrs r3, control
mrs r0, psp
+ isb
- ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
- ldr r2, [ r3 ]
+ add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */
- tst r14, #0x10 /* Is the task using the FPU context? If so, push high vfp registers. */
- it eq
- vstmdbeq r0 !, { s16 - s31 }
+ stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */
+ ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */
+ stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */
- mrs r1, control
- stmdb r0 !, { r1, r4 - r11, r14 } /* Save the remaining registers. */
- str r0, [ r2 ] /* Save the new top of stack into the first member of the TCB. */
-
- stmdb sp !, { r0, r3 }
- mov r0, # configMAX_SYSCALL_INTERRUPT_PRIORITY
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+ /*---------- Select next task. --------- */
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
msr basepri, r0
dsb
isb
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
- ldmia sp !, { r0, r3 }
- /* Restore the context. */
- ldr r1, [ r3 ]
- ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */
- add r1, r1, #4 /* Move onto the second item in the TCB... */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [ r2 ] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- ldr r2, =0xe000ed9c /* Region Base Address register. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- #if ( configTOTAL_MPU_REGIONS == 16 )
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [ r2 ] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+#if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+#endif /* configTOTAL_MPU_REGIONS == 16. */
- ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
msr control, r3
- tst r14, #0x10 /* Is the task using the FPU context? If so, pop the high vfp registers too. */
- it eq
- vldmiaeq r0 !, { s16 - s31 }
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
- msr psp, r0
- bx r14
- nop
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
/* *INDENT-ON* */
}
/*-----------------------------------------------------------*/
@@ -753,8 +1195,6 @@
orr r1, r1, #( 0xf << 20 ) /* Enable CP10 and CP11 coprocessors, then save back. */
str r1, [ r0 ]
bx r14
- nop
- nop
/* *INDENT-ON* */
}
/*-----------------------------------------------------------*/
@@ -901,7 +1341,7 @@
xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress =
( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */
( portMPU_REGION_VALID ) |
- ( portSTACK_REGION ); /* Region number. */
+ ( portSTACK_REGION ); /* Region number. */
xMPUSettings->xRegion[ 0 ].ulRegionAttribute =
( portMPU_REGION_READ_WRITE ) |
@@ -910,11 +1350,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -937,6 +1385,12 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -957,12 +1411,30 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -971,6 +1443,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
__asm uint32_t prvPortGetIPSR( void )
{
/* *INDENT-OFF* */
@@ -1041,3 +1554,99 @@
}
#endif /* configASSERT_DEFINED */
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
+
+ void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
+ int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ xMPU_SETTINGS * xTaskMpuSettings;
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
+
+ xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
+ }
+
+#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings;
+
+ if( xSchedulerRunning == pdFALSE )
+ {
+ /* Grant access to all the kernel objects before the scheduler
+ * is started. It is necessary because there is no task running
+ * yet and therefore, we cannot use the permissions of any
+ * task. */
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
+ ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+
+ #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+ BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
+ {
+ ( void ) lInternalIndexOfKernelObject;
+
+ /* If Access Control List feature is not used, all the tasks have
+ * access to all the kernel objects. */
+ return pdTRUE;
+ }
+
+ #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
diff --git a/Source/portable/RVDS/ARM_CM4_MPU/portmacro.h b/Source/portable/RVDS/ARM_CM4_MPU/portmacro.h
index c9c942e..0f30043 100644
--- a/Source/portable/RVDS/ARM_CM4_MPU/portmacro.h
+++ b/Source/portable/RVDS/ARM_CM4_MPU/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -59,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
-#if ( configUSE_16_BIT_TICKS == 1 )
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
-#else
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+#else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -191,9 +193,51 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#define MAX_CONTEXT_SIZE ( 52 )
+
+/* Size of an Access Control List (ACL) entry in bits. */
+#define portACL_ENTRY_SIZE_BITS ( 32U )
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
+ uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ];
+ #endif
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -207,9 +251,10 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 100
+#define portSVC_YIELD 101
+#define portSVC_RAISE_PRIVILEGE 102
+#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */
@@ -312,6 +357,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
static portFORCE_INLINE void vPortSetBASEPRI( uint32_t ulBASEPRI )
{
__asm
diff --git a/Source/portable/RVDS/ARM_CM7/ReadMe.txt b/Source/portable/RVDS/ARM_CM7/ReadMe.txt
index 0a2e7fd..d8e94ac 100644
--- a/Source/portable/RVDS/ARM_CM7/ReadMe.txt
+++ b/Source/portable/RVDS/ARM_CM7/ReadMe.txt
@@ -1,8 +1,8 @@
There are two options for running FreeRTOS on ARM Cortex-M7 microcontrollers.
The best option depends on the revision of the ARM Cortex-M7 core in use. The
revision is specified by an 'r' number, and a 'p' number, so will look something
-like 'r0p1'. Check the documentation for the microcontroller in use to find the
-revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
+like 'r0p1'. Check the documentation for the microcontroller in use to find the
+revision of the Cortex-M7 core used in that microcontroller. If in doubt, use
the FreeRTOS port provided specifically for r0p1 revisions, as that can be used
with all core revisions.
@@ -10,9 +10,9 @@
use the Cortex-M7 r0p1 port - the latter containing a minor errata workaround.
If the revision of the ARM Cortex-M7 core is not r0p1 then either option can be
-used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
+used, but it is recommended to use the FreeRTOS ARM Cortex-M4F port located in
the /FreeRTOS/Source/portable/RVDS/ARM_CM4F directory.
If the revision of the ARM Cortex-M7 core is r0p1 then use the FreeRTOS ARM
Cortex-M7 r0p1 port located in the /FreeRTOS/Source/portable/RVDS/ARM_CM7/r0p1
-directory.
\ No newline at end of file
+directory.
diff --git a/Source/portable/RVDS/ARM_CM7/r0p1/port.c b/Source/portable/RVDS/ARM_CM7/r0p1/port.c
index 9796035..2e1bdfc 100644
--- a/Source/portable/RVDS/ARM_CM7/r0p1/port.c
+++ b/Source/portable/RVDS/ARM_CM7/r0p1/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -65,8 +65,9 @@
#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
@@ -312,7 +313,8 @@
{
#if ( configASSERT_DEFINED == 1 )
{
- volatile uint32_t ulOriginalPriority;
+ volatile uint8_t ucOriginalPriority;
+ volatile uint32_t ulImplementedPrioBits = 0;
volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
@@ -322,7 +324,7 @@
* ensure interrupt entry is as fast and simple as possible.
*
* Save the interrupt priority value that is about to be clobbered. */
- ulOriginalPriority = *pucFirstUserPriorityRegister;
+ ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
* possible bits. */
@@ -331,40 +333,56 @@
/* Read the value back to see how many bits stuck. */
ucMaxPriorityValue = *pucFirstUserPriorityRegister;
- /* The kernel interrupt priority should be set to the lowest
- * priority. */
- configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) );
-
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
+ /* Check that the maximum system call priority is nonzero after
+ * accounting for the number of priority bits supported by the
+ * hardware. A priority of 0 is invalid because setting the BASEPRI
+ * register to 0 unmasks all interrupts, and interrupts with priority 0
+ * cannot be masked using BASEPRI.
+ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
+ configASSERT( ucMaxSysCallPriority );
+
+ /* Check that the bits not implemented in hardware are zero in
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
+
/* Calculate the maximum acceptable priority group value for the number
* of bits read back. */
- ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
- ulMaxPRIGROUPValue--;
+ ulImplementedPrioBits++;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
- #ifdef __NVIC_PRIO_BITS
+ if( ulImplementedPrioBits == 8 )
{
- /* Check the CMSIS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
+ /* When the hardware implements 8 priority bits, there is no way for
+ * the software to configure PRIGROUP to not have sub-priorities. As
+ * a result, the least significant bit is always used for sub-priority
+ * and there are 128 preemption priorities and 2 sub-priorities.
+ *
+ * This may cause some confusion in some cases - for example, if
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
+ * priority interrupts will be masked in Critical Sections as those
+ * are at the same preemption priority. This may appear confusing as
+ * 4 is higher (numerically lower) priority than
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
+ * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
+ * to 4, this confusion does not happen and the behaviour remains the same.
+ *
+ * The following assert ensures that the sub-priority bit in the
+ * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
+ * confusion. */
+ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
+ ulMaxPRIGROUPValue = 0;
}
- #endif
-
- #ifdef configPRIO_BITS
+ else
{
- /* Check the FreeRTOS configuration that defines the number of
- * priority bits matches the number of priority bits actually queried
- * from the hardware. */
- configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
+ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
}
- #endif
/* Shift the priority group value back to its position within the AIRCR
* register. */
@@ -373,7 +391,7 @@
/* Restore the clobbered interrupt priority register to its original
* value. */
- *pucFirstUserPriorityRegister = ulOriginalPriority;
+ *pucFirstUserPriorityRegister = ucOriginalPriority;
}
#endif /* configASSERT_DEFINED */
diff --git a/Source/portable/RVDS/ARM_CM7/r0p1/portmacro.h b/Source/portable/RVDS/ARM_CM7/r0p1/portmacro.h
index b1d9a98..19301dd 100644
--- a/Source/portable/RVDS/ARM_CM7/r0p1/portmacro.h
+++ b/Source/portable/RVDS/ARM_CM7/r0p1/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -59,16 +59,18 @@
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
diff --git a/Source/portable/Tasking/ARM_CM4F/port.c b/Source/portable/Tasking/ARM_CM4F/port.c
index 7e2dade..b5967d3 100644
--- a/Source/portable/Tasking/ARM_CM4F/port.c
+++ b/Source/portable/Tasking/ARM_CM4F/port.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -41,8 +41,9 @@
#define portNVIC_SYSTICK_CLK 0x00000004
#define portNVIC_SYSTICK_INT 0x00000002
#define portNVIC_SYSTICK_ENABLE 0x00000001
-#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16 )
-#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24 )
+#define portMIN_INTERRUPT_PRIORITY ( 255UL )
+#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL )
+#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL )
/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */
#define portVECTACTIVE_MASK ( 0xFFUL )
@@ -70,7 +71,7 @@
/* The priority used by the kernel is assigned to a variable to make access
* from inline assembler easier. */
-const uint32_t ulKernelPriority = configKERNEL_INTERRUPT_PRIORITY;
+const uint32_t ulKernelPriority = portMIN_INTERRUPT_PRIORITY;
/* Each task maintains its own interrupt status in the critical nesting
* variable. */
@@ -265,5 +266,4 @@
/* Configure SysTick to interrupt at the requested rate. */
*( portNVIC_SYSTICK_LOAD ) = ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
*( portNVIC_SYSTICK_CTRL ) = portNVIC_SYSTICK_CLK | portNVIC_SYSTICK_INT | portNVIC_SYSTICK_ENABLE;
-}
-/*-----------------------------------------------------------*/
+}
\ No newline at end of file
diff --git a/Source/portable/Tasking/ARM_CM4F/port_asm.asm b/Source/portable/Tasking/ARM_CM4F/port_asm.asm
index b0f436c..f47139e 100644
--- a/Source/portable/Tasking/ARM_CM4F/port_asm.asm
+++ b/Source/portable/Tasking/ARM_CM4F/port_asm.asm
@@ -1,5 +1,5 @@
;/*
-; * FreeRTOS Kernel V10.5.1
+; * FreeRTOS Kernel V10.6.2
; * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
; *
; * SPDX-License-Identifier: MIT
@@ -27,211 +27,210 @@
; */
- .extern pxCurrentTCB
- .extern vTaskSwitchContext
- .extern ulMaxSyscallInterruptPriorityConst
+ .extern pxCurrentTCB
+ .extern vTaskSwitchContext
+ .extern ulMaxSyscallInterruptPriorityConst
- .global _vector_14
- .global _lc_ref__vector_pp_14
- .global SVC_Handler
- .global vPortStartFirstTask
- .global vPortEnableVFP
- .global ulPortSetInterruptMask
- .global vPortClearInterruptMask
+ .global _vector_14
+ .global _lc_ref__vector_pp_14
+ .global SVC_Handler
+ .global vPortStartFirstTask
+ .global vPortEnableVFP
+ .global ulPortSetInterruptMask
+ .global vPortClearInterruptMask
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
_vector_14: .type func
- mrs r0, psp
- isb
+ mrs r0, psp
+ isb
- ;Get the location of the current TCB.
- ldr.w r3, =pxCurrentTCB
- ldr r2, [r3]
+ ;Get the location of the current TCB.
+ ldr.w r3, =pxCurrentTCB
+ ldr r2, [r3]
- ;Is the task using the FPU context? If so, push high vfp registers.
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ ;Is the task using the FPU context? If so, push high vfp registers.
+ tst r14, #0x10
+ it eq
+ vstmdbeq r0!, {s16-s31}
- ;Save the core registers.
- stmdb r0!, {r4-r11, r14}
+ ;Save the core registers.
+ stmdb r0!, {r4-r11, r14}
- ;Save the new top of stack into the first member of the TCB.
- str r0, [r2]
+ ;Save the new top of stack into the first member of the TCB.
+ str r0, [r2]
- stmdb sp!, {r0, r3}
- ldr.w r0, =ulMaxSyscallInterruptPriorityConst
- ldr r0, [r0]
- msr basepri, r0
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r0, r3}
+ stmdb sp!, {r0, r3}
+ ldr.w r0, =ulMaxSyscallInterruptPriorityConst
+ ldr r0, [r0]
+ msr basepri, r0
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
+ ldmia sp!, {r0, r3}
- ;The first item in pxCurrentTCB is the task top of stack.
- ldr r1, [r3]
- ldr r0, [r1]
+ ;The first item in pxCurrentTCB is the task top of stack.
+ ldr r1, [r3]
+ ldr r0, [r1]
- ;Pop the core registers.
- ldmia r0!, {r4-r11, r14}
+ ;Pop the core registers.
+ ldmia r0!, {r4-r11, r14}
- ;Is the task using the FPU context? If so, pop the high vfp registers too.
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ ;Is the task using the FPU context? If so, pop the high vfp registers too.
+ tst r14, #0x10
+ it eq
+ vldmiaeq r0!, {s16-s31}
- msr psp, r0
- isb
- bx r14
+ msr psp, r0
+ isb
+ bx r14
- .size _vector_14, $-_vector_14
- .endsec
+ .size _vector_14, $-_vector_14
+ .endsec
;-----------------------------------------------------------
; This function is an XMC4000 silicon errata workaround. It will get used when
; the SILICON_BUG_PMC_CM_001 linker macro is defined.
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
_lc_ref__vector_pp_14: .type func
- mrs r0, psp
- isb
+ mrs r0, psp
+ isb
- ;Get the location of the current TCB.
- ldr.w r3, =pxCurrentTCB
- ldr r2, [r3]
+ ;Get the location of the current TCB.
+ ldr.w r3, =pxCurrentTCB
+ ldr r2, [r3]
- ;Is the task using the FPU context? If so, push high vfp registers.
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ ;Is the task using the FPU context? If so, push high vfp registers.
+ tst r14, #0x10
+ it eq
+ vstmdbeq r0!, {s16-s31}
- ;Save the core registers.
- stmdb r0!, {r4-r11, r14}
+ ;Save the core registers.
+ stmdb r0!, {r4-r11, r14}
- ;Save the new top of stack into the first member of the TCB.
- str r0, [r2]
+ ;Save the new top of stack into the first member of the TCB.
+ str r0, [r2]
- stmdb sp!, {r3}
- ldr.w r0, =ulMaxSyscallInterruptPriorityConst
- ldr r0, [r0]
- msr basepri, r0
- bl vTaskSwitchContext
- mov r0, #0
- msr basepri, r0
- ldmia sp!, {r3}
+ stmdb sp!, {r3}
+ ldr.w r0, =ulMaxSyscallInterruptPriorityConst
+ ldr r0, [r0]
+ msr basepri, r0
+ bl vTaskSwitchContext
+ mov r0, #0
+ msr basepri, r0
+ ldmia sp!, {r3}
- ;The first item in pxCurrentTCB is the task top of stack.
- ldr r1, [r3]
- ldr r0, [r1]
+ ;The first item in pxCurrentTCB is the task top of stack.
+ ldr r1, [r3]
+ ldr r0, [r1]
- ;Pop the core registers.
- ldmia r0!, {r4-r11, r14}
+ ;Pop the core registers.
+ ldmia r0!, {r4-r11, r14}
- ;Is the task using the FPU context? If so, pop the high vfp registers too.
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ ;Is the task using the FPU context? If so, pop the high vfp registers too.
+ tst r14, #0x10
+ it eq
+ vldmiaeq r0!, {s16-s31}
- msr psp, r0
- isb
- push { lr }
- pop { pc } ; XMC4000 specific errata workaround. Do not used "bx lr" here.
+ msr psp, r0
+ isb
+ push { lr }
+ pop { pc } ; XMC4000 specific errata workaround. Do not used "bx lr" here.
- .size _lc_ref__vector_pp_14, $-_lc_ref__vector_pp_14
- .endsec
+ .size _lc_ref__vector_pp_14, $-_lc_ref__vector_pp_14
+ .endsec
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
SVC_Handler: .type func
- ;Get the location of the current TCB.
- ldr.w r3, =pxCurrentTCB
- ldr r1, [r3]
- ldr r0, [r1]
- ;Pop the core registers.
- ldmia r0!, {r4-r11, r14}
- msr psp, r0
- isb
- mov r0, #0
- msr basepri, r0
- bx r14
- .size SVC_Handler, $-SVC_Handler
- .endsec
+ ;Get the location of the current TCB.
+ ldr.w r3, =pxCurrentTCB
+ ldr r1, [r3]
+ ldr r0, [r1]
+ ;Pop the core registers.
+ ldmia r0!, {r4-r11, r14}
+ msr psp, r0
+ isb
+ mov r0, #0
+ msr basepri, r0
+ bx r14
+ .size SVC_Handler, $-SVC_Handler
+ .endsec
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
vPortStartFirstTask .type func
- ;Use the NVIC offset register to locate the stack.
- ldr.w r0, =0xE000ED08
- ldr r0, [r0]
- ldr r0, [r0]
- ;Set the msp back to the start of the stack.
- msr msp, r0
- ;Call SVC to start the first task.
- cpsie i
- cpsie f
- dsb
- isb
- svc 0
- .size vPortStartFirstTask, $-vPortStartFirstTask
- .endsec
+ ;Use the NVIC offset register to locate the stack.
+ ldr.w r0, =0xE000ED08
+ ldr r0, [r0]
+ ldr r0, [r0]
+ ;Set the msp back to the start of the stack.
+ msr msp, r0
+ ;Call SVC to start the first task.
+ cpsie i
+ cpsie f
+ dsb
+ isb
+ svc 0
+ .size vPortStartFirstTask, $-vPortStartFirstTask
+ .endsec
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
vPortEnableVFP .type func
- ;The FPU enable bits are in the CPACR.
- ldr.w r0, =0xE000ED88
- ldr r1, [r0]
+ ;The FPU enable bits are in the CPACR.
+ ldr.w r0, =0xE000ED88
+ ldr r1, [r0]
- ;Enable CP10 and CP11 coprocessors, then save back.
- orr r1, r1, #( 0xf << 20 )
- str r1, [r0]
- bx r14
- .size vPortEnableVFP, $-vPortEnableVFP
- .endsec
+ ;Enable CP10 and CP11 coprocessors, then save back.
+ orr r1, r1, #( 0xf << 20 )
+ str r1, [r0]
+ bx r14
+ .size vPortEnableVFP, $-vPortEnableVFP
+ .endsec
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
ulPortSetInterruptMask:
- mrs r0, basepri
- ldr.w r1, =ulMaxSyscallInterruptPriorityConst
- ldr r1, [r1]
- msr basepri, r1
- bx r14
- .size ulPortSetInterruptMask, $-ulPortSetInterruptMask
- .endsec
+ mrs r0, basepri
+ ldr.w r1, =ulMaxSyscallInterruptPriorityConst
+ ldr r1, [r1]
+ msr basepri, r1
+ bx r14
+ .size ulPortSetInterruptMask, $-ulPortSetInterruptMask
+ .endsec
;-----------------------------------------------------------
- .section .text
- .thumb
- .align 4
+ .section .text
+ .thumb
+ .align 4
vPortClearInterruptMask:
- msr basepri, r0
- bx r14
- .size vPortClearInterruptMask, $-vPortClearInterruptMask
- .endsec
+ msr basepri, r0
+ bx r14
+ .size vPortClearInterruptMask, $-vPortClearInterruptMask
+ .endsec
;-----------------------------------------------------------
- .end
-
+ .end
diff --git a/Source/portable/Tasking/ARM_CM4F/portmacro.h b/Source/portable/Tasking/ARM_CM4F/portmacro.h
index edc5b0e..1527091 100644
--- a/Source/portable/Tasking/ARM_CM4F/portmacro.h
+++ b/Source/portable/Tasking/ARM_CM4F/portmacro.h
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -30,9 +30,11 @@
#ifndef PORTMACRO_H
#define PORTMACRO_H
- #ifdef __cplusplus
- extern "C" {
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ extern "C" {
+#endif
+/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
@@ -58,16 +60,18 @@
typedef unsigned long UBaseType_t;
- #if ( configUSE_16_BIT_TICKS == 1 )
+ #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
- #else
+ #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
+ #else
+ #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
@@ -126,8 +130,10 @@
#define portNOP()
- #ifdef __cplusplus
- }
- #endif
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+ }
+#endif
+/* *INDENT-ON* */
#endif /* PORTMACRO_H */
diff --git a/Source/portable/readme.txt b/Source/portable/readme.txt
index 89f6b09..ca8f71e 100644
--- a/Source/portable/readme.txt
+++ b/Source/portable/readme.txt
@@ -17,4 +17,3 @@
FreeRTOS/Source/Portable/[compiler]/[architecture] directory. If this is the
only port you are interested in then all the other directories can be
ignored.
-
diff --git a/Source/queue.c b/Source/queue.c
index e76e7ff..40edbe4 100644
--- a/Source/queue.c
+++ b/Source/queue.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -68,14 +68,14 @@
typedef struct QueuePointers
{
- int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
- int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
+ int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
+ int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
} QueuePointers_t;
typedef struct SemaphoreData
{
- TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
- UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
+ TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
+ UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t;
/* Semaphores do not actually store or copy data, so have an item size of
@@ -99,27 +99,27 @@
*/
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
{
- int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
- int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
+ int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
+ int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
union
{
- QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
- SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
+ QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
+ SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
} u;
- List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
- List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
+ List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
+ List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
- volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */
- UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
- UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
+ volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
+ UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
+ UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
- volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
- volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
+ volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
+ volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
- uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
+ uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
#endif
#if ( configUSE_QUEUE_SETS == 1 )
@@ -268,14 +268,14 @@
* tasks than the number of tasks in the system.
*/
#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
- { \
+ do { \
const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
{ \
configASSERT( ( cTxLock ) != queueINT8_MAX ); \
( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
} \
- }
+ } while( 0 )
/*
* Macro to increment cRxLock member of the queue data structure. It is
@@ -283,14 +283,14 @@
* tasks than the number of tasks in the system.
*/
#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
- { \
+ do { \
const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
{ \
configASSERT( ( cRxLock ) != queueINT8_MAX ); \
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
} \
- }
+ } while( 0 )
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@@ -423,6 +423,55 @@
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue )
+ {
+ BaseType_t xReturn;
+ Queue_t * const pxQueue = xQueue;
+
+ configASSERT( pxQueue );
+ configASSERT( ppxStaticQueue );
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+ {
+ /* Check if the queue was statically allocated. */
+ if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
+ {
+ if( ppucQueueStorage != NULL )
+ {
+ *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
+ }
+
+ *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ xReturn = pdFALSE;
+ }
+ }
+ #else /* configSUPPORT_DYNAMIC_ALLOCATION */
+ {
+ /* Queue must have been statically allocated. */
+ if( ppucQueueStorage != NULL )
+ {
+ *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
+ }
+
+ *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
+ xReturn = pdTRUE;
+ }
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+
+ return xReturn;
+ }
+
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
@@ -2145,6 +2194,18 @@
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
+UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+{
+ return ( ( Queue_t * ) xQueue )->uxItemSize;
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+{
+ return ( ( Queue_t * ) xQueue )->uxLength;
+}
+/*-----------------------------------------------------------*/
+
#if ( configUSE_MUTEXES == 1 )
static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
diff --git a/Source/st_readme.txt b/Source/st_readme.txt
index 0736166..06fdf50 100644
--- a/Source/st_readme.txt
+++ b/Source/st_readme.txt
@@ -31,6 +31,143 @@
@endverbatim
=======
+### 11-October-2024 ###
+=========================
+ + FreeRTOS: Update to FreeRTOS v10.6.2
+
+ + Add files to include folder
+ - include/mpu_syscall_numbers.h
+ - include/newlib-freertos.h
+ - include/picolibc-freertos.h
+
+ + Add file to portable/Common folder
+ - portable/Common/mpu_wrappers_v2.c
+
+ + Add files to portable folder
+ - portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c
+ - portable/GCC-RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c
+ - portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c
+ - portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
+ - portable/IAR/ARM_CM4_MPU/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S
+ - portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S
+
+ + Add support for 16 MPU regions to Cortex-M33, M55 and M85 ports
+ - Source/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM33/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM55/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM85/non_secure/portmacro.h
+
+ + Add missing MPU API prototypes
+ - Source/include/mpu_prototypes.h
+ - Source/include/mpu_wrappers.h
+
+ + CMSIS_RTOS_V2: update API to be compatible with Cortex A
+ - CMSIS_RTOS_V2/cmsis_os2.c
+
+### 03-May-2024 ###
+=========================
+ + Restore ARM_CA9, ARM_CM55 and ARM_CM85 port files
+
+ - Source/portable/GCC/ARM_CA9/port.c
+ - Source/portable/GCC/ARM_CA9/portASM.S
+ - Source/portable/GCC/ARM_CA9/portmacro.h
+ - Source/portable/GCC/ARM_CM55/non_secure/port.c
+ - Source/portable/GCC/ARM_CM55/non_secure/portasm.c
+ - Source/portable/GCC/ARM_CM55/non_secure/portasm.h
+ - Source/portable/GCC/ARM_CM55/non_secure/portmacro.h
+ - Source/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
+ - Source/portable/GCC/ARM_CM55/secure/secure_context.c
+ - Source/portable/GCC/ARM_CM55/secure/secure_context.h
+ - Source/portable/GCC/ARM_CM55/secure/secure_context_port.c
+ - Source/portable/GCC/ARM_CM55/secure/secure_heap.c
+ - Source/portable/GCC/ARM_CM55/secure/secure_heap.h
+ - Source/portable/GCC/ARM_CM55/secure/secure_init.c
+ - Source/portable/GCC/ARM_CM55/secure/secure_init.h
+ - Source/portable/GCC/ARM_CM55/secure/secure_port_macros.h
+ - Source/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
+ - Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
+ - Source/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.h
+ - Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h
+ - Source/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
+ - Source/portable/GCC/ARM_CM85/non_secure/port.c
+ - Source/portable/GCC/ARM_CM85/non_secure/portasm.c
+ - Source/portable/GCC/ARM_CM85/non_secure/portasm.h
+ - Source/portable/GCC/ARM_CM85/non_secure/portmacro.h
+ - Source/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
+ - Source/portable/GCC/ARM_CM85/secure/secure_context.c
+ - Source/portable/GCC/ARM_CM85/secure/secure_context.h
+ - Source/portable/GCC/ARM_CM85/secure/secure_context_port.c
+ - Source/portable/GCC/ARM_CM85/secure/secure_heap.c
+ - Source/portable/GCC/ARM_CM85/secure/secure_heap.h
+ - Source/portable/GCC/ARM_CM85/secure/secure_init.c
+ - Source/portable/GCC/ARM_CM85/secure/secure_init.h
+ - Source/portable/GCC/ARM_CM85/secure/secure_port_macros.h
+ - Source/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
+ - Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
+ - Source/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.h
+ - Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h
+ - Source/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
+ - Source/portable/IAR/ARM_CA9/port.c
+ - Source/portable/IAR/ARM_CA9/portASM.h
+ - Source/portable/IAR/ARM_CA9/portASM.s
+ - Source/portable/IAR/ARM_CA9/portmacro.h
+ - Source/portable/IAR/ARM_CM55/non_secure/port.c
+ - Source/portable/IAR/ARM_CM55/non_secure/portasm.h
+ - Source/portable/IAR/ARM_CM55/non_secure/portasm.s
+ - Source/portable/IAR/ARM_CM55/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
+ - Source/portable/IAR/ARM_CM55/secure/secure_context.c
+ - Source/portable/IAR/ARM_CM55/secure/secure_context.h
+ - Source/portable/IAR/ARM_CM55/secure/secure_context_port_asm.s
+ - Source/portable/IAR/ARM_CM55/secure/secure_heap.c
+ - Source/portable/IAR/ARM_CM55/secure/secure_heap.h
+ - Source/portable/IAR/ARM_CM55/secure/secure_init.c
+ - Source/portable/IAR/ARM_CM55/secure/secure_init.h
+ - Source/portable/IAR/ARM_CM55/secure/secure_port_macros.h
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.h
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
+ - Source/portable/IAR/ARM_CM85/non_secure/port.c
+ - Source/portable/IAR/ARM_CM85/non_secure/portasm.h
+ - Source/portable/IAR/ARM_CM85/non_secure/portasm.s
+ - Source/portable/IAR/ARM_CM85/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
+ - Source/portable/IAR/ARM_CM85/secure/secure_context.c
+ - Source/portable/IAR/ARM_CM85/secure/secure_context.h
+ - Source/portable/IAR/ARM_CM85/secure/secure_context_port_asm.s
+ - Source/portable/IAR/ARM_CM85/secure/secure_heap.c
+ - Source/portable/IAR/ARM_CM85/secure/secure_heap.h
+ - Source/portable/IAR/ARM_CM85/secure/secure_init.c
+ - Source/portable/IAR/ARM_CM85/secure/secure_init.h
+ - Source/portable/IAR/ARM_CM85/secure/secure_port_macros.h
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.h
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h
+ - Source/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
+ -
+
+### 17-November-2023 ###
+=========================
+ + CMSIS_RTOS_V2: Restore cmsis_os.h file for backward comaptibility
### 18-August-2023 ###
=========================
@@ -420,7 +557,7 @@
example : osMutex1Id = osRecursiveMutexCreate (osMutex(Mutex1));
- Fix implementation of functions osSemaphoreWait(), osMutexRelease() and osMutexWait() by using the appropriate
- freeRTOS FromISR APIs when called from an interrupt.
+ freeRTOS FromISR APIs when called from an interrupt.
- Fix compilation warning when the constant INCLUDE_eTaskGetState is not defined
diff --git a/Source/stream_buffer.c b/Source/stream_buffer.c
index fed7eb8..53208ac 100644
--- a/Source/stream_buffer.c
+++ b/Source/stream_buffer.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -70,7 +70,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
- ( void ) xTaskResumeAll();
+ ( void ) xTaskResumeAll()
#endif /* sbRECEIVE_COMPLETED */
/* If user has provided a per-instance receive complete callback, then
@@ -78,7 +78,7 @@
*/
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
#define prvRECEIVE_COMPLETED( pxStreamBuffer ) \
- { \
+ do { \
if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \
{ \
( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \
@@ -87,7 +87,7 @@
{ \
sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ); \
} \
- }
+ } while( 0 )
#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
#define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( ( pxStreamBuffer ) )
#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
@@ -95,10 +95,10 @@
#ifndef sbRECEIVE_COMPLETED_FROM_ISR
#define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \
pxHigherPriorityTaskWoken ) \
- { \
+ do { \
UBaseType_t uxSavedInterruptStatus; \
\
- uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
@@ -110,13 +110,13 @@
} \
} \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
- }
+ } while( 0 )
#endif /* sbRECEIVE_COMPLETED_FROM_ISR */
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
#define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \
pxHigherPriorityTaskWoken ) \
- { \
+ do { \
if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \
{ \
( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \
@@ -125,7 +125,7 @@
{ \
sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \
} \
- }
+ } while( 0 )
#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
#define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \
sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) )
@@ -147,7 +147,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
- ( void ) xTaskResumeAll();
+ ( void ) xTaskResumeAll()
#endif /* sbSEND_COMPLETED */
/* If user has provided a per-instance send completed callback, then
@@ -155,7 +155,7 @@
*/
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
#define prvSEND_COMPLETED( pxStreamBuffer ) \
- { \
+ do { \
if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \
{ \
pxStreamBuffer->pxSendCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \
@@ -164,7 +164,7 @@
{ \
sbSEND_COMPLETED( ( pxStreamBuffer ) ); \
} \
- }
+ } while( 0 )
#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
#define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) )
#endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
@@ -172,10 +172,10 @@
#ifndef sbSEND_COMPLETE_FROM_ISR
#define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \
- { \
+ do { \
UBaseType_t uxSavedInterruptStatus; \
\
- uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
@@ -187,13 +187,13 @@
} \
} \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
- }
+ } while( 0 )
#endif /* sbSEND_COMPLETE_FROM_ISR */
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
#define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \
- { \
+ do { \
if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \
{ \
( pxStreamBuffer )->pxSendCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \
@@ -202,7 +202,7 @@
{ \
sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \
} \
- }
+ } while( 0 )
#else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */
#define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \
sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) )
@@ -324,7 +324,7 @@
StreamBufferCallbackFunction_t pxSendCompletedCallback,
StreamBufferCallbackFunction_t pxReceiveCompletedCallback )
{
- uint8_t * pucAllocatedMemory;
+ void * pvAllocatedMemory;
uint8_t ucFlags;
/* In case the stream buffer is going to be used as a message buffer
@@ -364,31 +364,31 @@
if( xBufferSizeBytes < ( xBufferSizeBytes + 1 + sizeof( StreamBuffer_t ) ) )
{
xBufferSizeBytes++;
- pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */
+ pvAllocatedMemory = pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) );
}
else
{
- pucAllocatedMemory = NULL;
+ pvAllocatedMemory = NULL;
}
- if( pucAllocatedMemory != NULL )
+ if( pvAllocatedMemory != NULL )
{
- prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */
- pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */
+ prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pvAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */
+ ( ( uint8_t * ) pvAllocatedMemory ) + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */
xBufferSizeBytes,
xTriggerLevelBytes,
ucFlags,
pxSendCompletedCallback,
pxReceiveCompletedCallback );
- traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer );
+ traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xIsMessageBuffer );
}
else
{
traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer );
}
- return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */
+ return ( StreamBufferHandle_t ) pvAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
/*-----------------------------------------------------------*/
@@ -474,6 +474,34 @@
#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
/*-----------------------------------------------------------*/
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer,
+ uint8_t ** ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t ** ppxStaticStreamBuffer )
+ {
+ BaseType_t xReturn;
+ StreamBuffer_t * const pxStreamBuffer = xStreamBuffer;
+
+ configASSERT( pxStreamBuffer );
+ configASSERT( ppucStreamBufferStorageArea );
+ configASSERT( ppxStaticStreamBuffer );
+
+ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) != ( uint8_t ) 0 )
+ {
+ *ppucStreamBufferStorageArea = pxStreamBuffer->pucBuffer;
+ *ppxStaticStreamBuffer = ( StaticStreamBuffer_t * ) pxStreamBuffer;
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ xReturn = pdFALSE;
+ }
+
+ return xReturn;
+ }
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer )
{
StreamBuffer_t * pxStreamBuffer = xStreamBuffer;
@@ -1192,7 +1220,7 @@
configASSERT( pxStreamBuffer );
- uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR();
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL )
{
@@ -1223,7 +1251,7 @@
configASSERT( pxStreamBuffer );
- uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR();
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL )
{
@@ -1372,9 +1400,9 @@
/* The value written just has to be identifiable when looking at the
* memory. Don't use 0xA5 as that is the stack fill value and could
* result in confusion as to what is actually being observed. */
- const BaseType_t xWriteValue = 0x55;
- configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer );
- } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */
+ #define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 )
+ configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer );
+ }
#endif
( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */
diff --git a/Source/tasks.c b/Source/tasks.c
index 0cc948f..afb7009 100644
--- a/Source/tasks.c
+++ b/Source/tasks.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -124,17 +124,17 @@
/* uxTopReadyPriority holds the priority of the highest priority ready
* state task. */
#define taskRECORD_READY_PRIORITY( uxPriority ) \
- { \
+ do { \
if( ( uxPriority ) > uxTopReadyPriority ) \
{ \
uxTopReadyPriority = ( uxPriority ); \
} \
- } /* taskRECORD_READY_PRIORITY */
+ } while( 0 ) /* taskRECORD_READY_PRIORITY */
/*-----------------------------------------------------------*/
#define taskSELECT_HIGHEST_PRIORITY_TASK() \
- { \
+ do { \
UBaseType_t uxTopPriority = uxTopReadyPriority; \
\
/* Find the highest priority queue that contains ready tasks. */ \
@@ -148,7 +148,7 @@
* the same priority get an equal share of the processor time. */ \
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
uxTopReadyPriority = uxTopPriority; \
- } /* taskSELECT_HIGHEST_PRIORITY_TASK */
+ } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
/*-----------------------------------------------------------*/
@@ -170,14 +170,14 @@
/*-----------------------------------------------------------*/
#define taskSELECT_HIGHEST_PRIORITY_TASK() \
- { \
+ do { \
UBaseType_t uxTopPriority; \
\
/* Find the highest priority list that contains ready tasks. */ \
portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
- } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
+ } while( 0 )
/*-----------------------------------------------------------*/
@@ -185,12 +185,12 @@
* is being referenced from a ready list. If it is referenced from a delayed
* or suspended list then it won't be in a ready list. */
#define taskRESET_READY_PRIORITY( uxPriority ) \
- { \
+ do { \
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
{ \
portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
} \
- }
+ } while( 0 )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
@@ -199,7 +199,7 @@
/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
* count overflows. */
#define taskSWITCH_DELAYED_LISTS() \
- { \
+ do { \
List_t * pxTemp; \
\
/* The delayed tasks list should be empty when the lists are switched. */ \
@@ -210,7 +210,7 @@
pxOverflowDelayedTaskList = pxTemp; \
xNumOfOverflows++; \
prvResetNextTaskUnblockTime(); \
- }
+ } while( 0 )
/*-----------------------------------------------------------*/
@@ -218,11 +218,13 @@
* Place the task represented by pxTCB into the appropriate ready list for
* the task. It is inserted at the end of the list.
*/
-#define prvAddTaskToReadyList( pxTCB ) \
- traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
- taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
- listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
- tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
+#define prvAddTaskToReadyList( pxTCB ) \
+ do { \
+ traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
+ taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
+ listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
+ tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \
+ } while( 0 )
/*-----------------------------------------------------------*/
/*
@@ -241,10 +243,12 @@
* the scheduler that the value should not be changed - in which case it is the
* responsibility of whichever module is using the value to ensure it gets set back
* to its original value when it is released. */
-#if ( configUSE_16_BIT_TICKS == 1 )
+#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
#define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
-#else
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
#define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
+#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
+ #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL
#endif
/*
@@ -254,33 +258,33 @@
*/
typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
{
- volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
+ volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
#if ( portUSING_MPU_WRAPPERS == 1 )
- xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
+ xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
#endif
- ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
- ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
- UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
- StackType_t * pxStack; /*< Points to the start of the stack. */
- char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
+ ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
+ ListItem_t xEventListItem; /**< Used to reference a task from an event list. */
+ UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */
+ StackType_t * pxStack; /**< Points to the start of the stack. */
+ char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
- StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */
+ StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
#endif
#if ( portCRITICAL_NESTING_IN_TCB == 1 )
- UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
+ UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
- UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
- UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
+ UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
+ UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
#endif
#if ( configUSE_MUTEXES == 1 )
- UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
+ UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
UBaseType_t uxMutexesHeld;
#endif
@@ -293,11 +297,11 @@
#endif
#if ( configGENERATE_RUN_TIME_STATS == 1 )
- configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
+ configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
#endif
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
- configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
+ configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
#endif
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
@@ -308,7 +312,7 @@
/* See the comments in FreeRTOS.h with the definition of
* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
#if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
- uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
+ uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
#endif
#if ( INCLUDE_xTaskAbortDelay == 1 )
@@ -332,23 +336,23 @@
* xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
* doing so breaks some kernel aware debuggers and debuggers that rely on removing
* the static qualifier. */
-PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */
-PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
-PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
-PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
-PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
-PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
+PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
+PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */
+PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
+PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */
+PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
+PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#if ( INCLUDE_vTaskDelete == 1 )
- PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
+ PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
#endif
#if ( INCLUDE_vTaskSuspend == 1 )
- PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
+ PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
#endif
@@ -368,7 +372,7 @@
PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
-PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
+PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /**< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
/* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
* For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
@@ -383,14 +387,14 @@
* kernel to move the task from the pending ready list into the real ready list
* when the scheduler is unsuspended. The pending ready list itself can only be
* accessed from a critical section. */
-PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
+PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
#if ( configGENERATE_RUN_TIME_STATS == 1 )
/* Do not move these variables to function scope as doing so prevents the
* code working with debuggers that need to remove the static qualifier. */
- PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
- PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
+ PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /**< Holds the value of a timer/counter the last time a task was switched in. */
+ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /**< Holds the total amount of execution time as defined by the run time counter clock. */
#endif
@@ -951,10 +955,10 @@
}
#endif
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
{
/* Allocate and initialize memory for the task's TLS Block. */
- configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock );
+ configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
}
#endif
@@ -971,17 +975,17 @@
{
#if ( portSTACK_GROWTH < 0 )
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#else /* portSTACK_GROWTH */
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#endif /* portSTACK_GROWTH */
}
#else /* portHAS_STACK_OVERFLOW_CHECKING */
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#endif /* portHAS_STACK_OVERFLOW_CHECKING */
}
@@ -1196,7 +1200,7 @@
{
if( pxTCB == pxCurrentTCB )
{
- configASSERT( uxSchedulerSuspended == 0 );
+ configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
portYIELD_WITHIN_API();
}
else
@@ -1219,7 +1223,7 @@
configASSERT( pxPreviousWakeTime );
configASSERT( ( xTimeIncrement > 0U ) );
- configASSERT( uxSchedulerSuspended == 0 );
+ configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
vTaskSuspendAll();
{
@@ -1305,7 +1309,7 @@
/* A delay time of zero just forces a reschedule. */
if( xTicksToDelay > ( TickType_t ) 0U )
{
- configASSERT( uxSchedulerSuspended == 0 );
+ configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
vTaskSuspendAll();
{
traceTASK_DELAY();
@@ -1347,6 +1351,7 @@
{
eTaskState eReturn;
List_t const * pxStateList;
+ List_t const * pxEventList;
List_t const * pxDelayedList;
List_t const * pxOverflowedDelayedList;
const TCB_t * const pxTCB = xTask;
@@ -1363,12 +1368,20 @@
taskENTER_CRITICAL();
{
pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
+ pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
pxDelayedList = pxDelayedTaskList;
pxOverflowedDelayedList = pxOverflowDelayedTaskList;
}
taskEXIT_CRITICAL();
- if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
+ if( pxEventList == &xPendingReadyList )
+ {
+ /* The task has been placed on the pending ready list, so its
+ * state is eReady regardless of what list the task's state list
+ * item is currently placed on. */
+ eReturn = eReady;
+ }
+ else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
{
/* The task being queried is referenced from one of the Blocked
* lists. */
@@ -1467,7 +1480,8 @@
UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
{
TCB_t const * pxTCB;
- UBaseType_t uxReturn, uxSavedInterruptState;
+ UBaseType_t uxReturn;
+ UBaseType_t uxSavedInterruptState;
/* RTOS ports that support interrupt nesting have the concept of a
* maximum system call (or maximum API call) interrupt priority.
@@ -1552,7 +1566,7 @@
/* The priority of a task other than the currently
* running task is being raised. Is the priority being
* raised above that of the running task? */
- if( uxNewPriority >= pxCurrentTCB->uxPriority )
+ if( uxNewPriority > pxCurrentTCB->uxPriority )
{
xYieldRequired = pdTRUE;
}
@@ -1743,7 +1757,7 @@
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
- configASSERT( uxSchedulerSuspended == 0 );
+ configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
portYIELD_WITHIN_API();
}
else
@@ -1845,7 +1859,7 @@
prvAddTaskToReadyList( pxTCB );
/* A higher priority task may have just been resumed. */
- if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
{
/* This yield may not cause the task just resumed to run,
* but will leave the lists in the correct state for the
@@ -1909,11 +1923,11 @@
traceTASK_RESUME_FROM_ISR( pxTCB );
/* Check the ready lists can be accessed. */
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
/* Ready lists can be accessed so move the task from the
* suspended list to the ready list directly. */
- if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
{
xYieldRequired = pdTRUE;
@@ -2025,7 +2039,7 @@
* starts to run. */
portDISABLE_INTERRUPTS();
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
{
/* Switch C-Runtime's TLS Block to point to the TLS
* block specific to the task that will run first. */
@@ -2178,7 +2192,7 @@
/* If uxSchedulerSuspended is zero then this function does not match a
* previous call to vTaskSuspendAll(). */
- configASSERT( uxSchedulerSuspended );
+ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
/* It is possible that an ISR caused a task to be removed from an event
* list while the scheduler was suspended. If this was the case then the
@@ -2189,7 +2203,7 @@
{
--uxSchedulerSuspended;
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
{
@@ -2203,9 +2217,9 @@
listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB );
- /* If the moved task has a priority higher than or equal to
- * the current task then a yield must be performed. */
- if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
+ /* If the moved task has a priority higher than the current
+ * task then a yield must be performed. */
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
{
xYieldPending = pdTRUE;
}
@@ -2484,6 +2498,53 @@
#endif /* INCLUDE_xTaskGetHandle */
/*-----------------------------------------------------------*/
+#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer )
+ {
+ BaseType_t xReturn;
+ TCB_t * pxTCB;
+
+ configASSERT( ppuxStackBuffer != NULL );
+ configASSERT( ppxTaskBuffer != NULL );
+
+ pxTCB = prvGetTCBFromHandle( xTask );
+
+ #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
+ {
+ if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
+ {
+ *ppuxStackBuffer = pxTCB->pxStack;
+ *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
+ xReturn = pdTRUE;
+ }
+ else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
+ {
+ *ppuxStackBuffer = pxTCB->pxStack;
+ *ppxTaskBuffer = NULL;
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ xReturn = pdFALSE;
+ }
+ }
+ #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
+ {
+ *ppuxStackBuffer = pxTCB->pxStack;
+ *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
+ xReturn = pdTRUE;
+ }
+ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
+
+ return xReturn;
+ }
+
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
@@ -2533,7 +2594,7 @@
#ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
#else
- *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
+ *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
#endif
}
}
@@ -2590,7 +2651,7 @@
/* Arrange for xTickCount to reach xNextTaskUnblockTime in
* xTaskIncrementTick() when the scheduler resumes. This ensures
* that any delayed tasks are resumed at the correct time. */
- configASSERT( uxSchedulerSuspended );
+ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
configASSERT( xTicksToJump != ( TickType_t ) 0 );
/* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
@@ -2619,7 +2680,7 @@
/* Must not be called with the scheduler suspended as the implementation
* relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
- configASSERT( uxSchedulerSuspended == 0 );
+ configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
@@ -2728,7 +2789,7 @@
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
/* Minor optimisation. The tick count cannot change in this
* block. */
@@ -3008,7 +3069,7 @@
void vTaskSwitchContext( void )
{
- if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
{
/* The scheduler is currently suspended - do not allow a context
* switch. */
@@ -3024,7 +3085,7 @@
#ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
#else
- ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
+ ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
#endif
/* Add the amount of time the task has been running to the
@@ -3069,7 +3130,7 @@
}
#endif
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
{
/* Switch C-Runtime's TLS Block to point to the TLS
* Block specific to this task. */
@@ -3113,7 +3174,7 @@
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
* the event groups implementation. */
- configASSERT( uxSchedulerSuspended != 0 );
+ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
/* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a
@@ -3188,7 +3249,7 @@
configASSERT( pxUnblockedTCB );
listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
prvAddTaskToReadyList( pxUnblockedTCB );
@@ -3241,7 +3302,7 @@
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
* the event flags implementation. */
- configASSERT( uxSchedulerSuspended != pdFALSE );
+ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
/* Store the new item value in the event list. */
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
@@ -3424,6 +3485,7 @@
* void prvIdleTask( void *pvParameters );
*
*/
+
static portTASK_FUNCTION( prvIdleTask, pvParameters )
{
/* Stop warnings. */
@@ -3477,13 +3539,7 @@
#if ( configUSE_IDLE_HOOK == 1 )
{
- extern void vApplicationIdleHook( void );
-
- /* Call the user defined function from within the idle task. This
- * allows the application designer to add background functionality
- * without the overhead of a separate task.
- * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
- * CALL A FUNCTION THAT MIGHT BLOCK. */
+ /* Call the user defined function from within the idle task. */
vApplicationIdleHook();
}
#endif /* configUSE_IDLE_HOOK */
@@ -3787,6 +3843,18 @@
}
}
#endif /* INCLUDE_vTaskSuspend */
+
+ /* Tasks can be in pending ready list and other state list at the
+ * same time. These tasks are in ready state no matter what state
+ * list the task is in. */
+ taskENTER_CRITICAL();
+ {
+ if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
+ {
+ pxTaskStatus->eCurrentState = eReady;
+ }
+ }
+ taskEXIT_CRITICAL();
}
}
else
@@ -3950,7 +4018,7 @@
* want to allocate and clean RAM statically. */
portCLEAN_UP_TCB( pxTCB );
- #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
+ #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
{
/* Free up the memory allocated for the task's TLS Block. */
configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
@@ -4046,7 +4114,7 @@
}
else
{
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
xReturn = taskSCHEDULER_RUNNING;
}
@@ -5066,7 +5134,7 @@
/* The task should not have been on an event list. */
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB );
@@ -5157,7 +5225,7 @@
/* The task should not have been on an event list. */
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
- if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
+ if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB );
@@ -5238,6 +5306,8 @@
TCB_t * pxTCB;
uint32_t ulReturn;
+ configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
+
/* If null is passed in here then it is the calling task that is having
* its notification state cleared. */
pxTCB = prvGetTCBFromHandle( xTask );
@@ -5257,23 +5327,23 @@
#endif /* configUSE_TASK_NOTIFICATIONS */
/*-----------------------------------------------------------*/
-#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
- configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
+ configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
{
- return xIdleTaskHandle->ulRunTimeCounter;
+ return xTask->ulRunTimeCounter;
}
#endif
/*-----------------------------------------------------------*/
-#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
- configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
+ configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
{
configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
- ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE();
+ ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
/* For percentage calculations. */
ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
@@ -5281,7 +5351,7 @@
/* Avoid divide by zero errors. */
if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
{
- ulReturn = xIdleTaskHandle->ulRunTimeCounter / ulTotalTime;
+ ulReturn = xTask->ulRunTimeCounter / ulTotalTime;
}
else
{
@@ -5291,7 +5361,27 @@
return ulReturn;
}
-#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+#endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
+ {
+ return ulTaskGetRunTimeCounter( xIdleTaskHandle );
+ }
+
+#endif
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
+ {
+ return ulTaskGetRunTimePercent( xIdleTaskHandle );
+ }
+
+#endif
/*-----------------------------------------------------------*/
static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
@@ -5405,6 +5495,21 @@
}
#endif /* INCLUDE_vTaskSuspend */
}
+/*-----------------------------------------------------------*/
+
+#if ( portUSING_MPU_WRAPPERS == 1 )
+
+ xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
+ {
+ TCB_t * pxTCB;
+
+ pxTCB = prvGetTCBFromHandle( xTask );
+
+ return &( pxTCB->xMPUSettings );
+ }
+
+#endif /* portUSING_MPU_WRAPPERS */
+/*-----------------------------------------------------------*/
/* Code below here allows additional code to be inserted into this source file,
* especially where access to file scope functions and data is needed (for example
diff --git a/Source/timers.c b/Source/timers.c
index 800a2b8..d5012ad 100644
--- a/Source/timers.c
+++ b/Source/timers.c
@@ -1,5 +1,5 @@
/*
- * FreeRTOS Kernel V10.5.1
+ * FreeRTOS Kernel V10.6.2
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
@@ -74,15 +74,15 @@
/* The definition of the timers themselves. */
typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */
{
- const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
- ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */
- TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */
- void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */
- TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */
+ const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
+ ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */
+ TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */
+ void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */
+ TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */
#if ( configUSE_TRACE_FACILITY == 1 )
- UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */
+ UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */
#endif
- uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */
+ uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */
} xTIMER;
/* The old xTIMER name is maintained above then typedefed to the new Timer_t
@@ -96,8 +96,8 @@
* and xCallbackParametersType respectively. */
typedef struct tmrTimerParameters
{
- TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */
- Timer_t * pxTimer; /*<< The timer to which the command will be applied. */
+ TickType_t xMessageValue; /**< An optional value used by a subset of commands, for example, when changing the period of a timer. */
+ Timer_t * pxTimer; /**< The timer to which the command will be applied. */
} TimerParameter_t;
@@ -112,7 +112,7 @@
* that is used to determine which message type is valid. */
typedef struct tmrTimerQueueMessage
{
- BaseType_t xMessageID; /*<< The command being sent to the timer service task. */
+ BaseType_t xMessageID; /**< The command being sent to the timer service task. */
union
{
TimerParameter_t xTimerParameters;
@@ -510,6 +510,30 @@
}
/*-----------------------------------------------------------*/
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+ BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer )
+ {
+ BaseType_t xReturn;
+ Timer_t * pxTimer = xTimer;
+
+ configASSERT( ppxTimerBuffer != NULL );
+
+ if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) != 0 )
+ {
+ *ppxTimerBuffer = ( StaticTimer_t * ) pxTimer;
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ xReturn = pdFALSE;
+ }
+
+ return xReturn;
+ }
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
{
Timer_t * pxTimer = xTimer;
@@ -575,8 +599,6 @@
#if ( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 )
{
- extern void vApplicationDaemonTaskStartupHook( void );
-
/* Allow the application writer to execute some code in the context of
* this task at the point the task starts executing. This is useful if the
* application includes initialisation code that would benefit from