Memory Protection Unit (MPU) Enhancements (#705)
Memory Protection Unit (MPU) Enhancements
This commit introduces a new MPU wrapper that places additional
restrictions on unprivileged tasks. The following is the list of changes
introduced with the new MPU wrapper:
1. Opaque and indirectly verifiable integers for kernel object handles:
All the kernel object handles (for example, queue handles) are now
opaque integers. Previously object handles were raw pointers.
2. Saving the task context in Task Control Block (TCB): When a task is
swapped out by the scheduler, the task's context is now saved in its
TCB. Previously the task's context was saved on its stack.
3. Execute system calls on a separate privileged only stack: FreeRTOS
system calls, which execute with elevated privilege, now use a
separate privileged only stack. Previously system calls used the
calling task's stack. The application writer can control the size of
the system call stack using new configSYSTEM_CALL_STACK_SIZE config
macro.
4. Memory bounds checks: FreeRTOS system calls which accept a pointer
and de-reference it, now verify that the calling task has required
permissions to access the memory location referenced by the pointer.
5. System call restrictions: The following system calls are no longer
available to unprivileged tasks:
- vQueueDelete
- xQueueCreateMutex
- xQueueCreateMutexStatic
- xQueueCreateCountingSemaphore
- xQueueCreateCountingSemaphoreStatic
- xQueueGenericCreate
- xQueueGenericCreateStatic
- xQueueCreateSet
- xQueueRemoveFromSet
- xQueueGenericReset
- xTaskCreate
- xTaskCreateStatic
- vTaskDelete
- vTaskPrioritySet
- vTaskSuspendAll
- xTaskResumeAll
- xTaskGetHandle
- xTaskCallApplicationTaskHook
- vTaskList
- vTaskGetRunTimeStats
- xTaskCatchUpTicks
- xEventGroupCreate
- xEventGroupCreateStatic
- vEventGroupDelete
- xStreamBufferGenericCreate
- xStreamBufferGenericCreateStatic
- vStreamBufferDelete
- xStreamBufferReset
Also, an unprivileged task can no longer use vTaskSuspend to suspend
any task other than itself.
We thank the following people for their inputs in these enhancements:
- David Reiss of Meta Platforms, Inc.
- Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling
of School of Computer Science and Engineering, Southeast University,
China.
- Xinwen Fu of Department of Computer Science, University of
Massachusetts Lowell, USA.
- Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado
Boulder, USA.diff --git a/.github/lexicon.txt b/.github/lexicon.txt
index 1a7d485..ec6577e 100644
--- a/.github/lexicon.txt
+++ b/.github/lexicon.txt
@@ -2468,6 +2468,7 @@
uxpriority
uxprioritytouse
uxqueue
+uxqueuegetqueueitemsize
uxqueuelength
uxqueuemessageswaiting
uxqueuespacesavailable
diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h
index b7d5054..831ed03 100644
--- a/include/FreeRTOS.h
+++ b/include/FreeRTOS.h
@@ -81,6 +81,11 @@
#endif
#endif
+/* Set configUSE_MPU_WRAPPERS_V1 to 1 to use MPU wrappers v1. */
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
/* Basic FreeRTOS definitions. */
#include "projdefs.h"
diff --git a/include/mpu_prototypes.h b/include/mpu_prototypes.h
index 08fa051..633efd4 100644
--- a/include/mpu_prototypes.h
+++ b/include/mpu_prototypes.h
@@ -39,20 +39,6 @@
#define MPU_PROTOTYPES_H
/* MPU versions of task.h API functions. */
-BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode,
- const char * const pcName,
- const uint16_t usStackDepth,
- void * const pvParameters,
- UBaseType_t uxPriority,
- TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
- const char * const pcName,
- const uint32_t ulStackDepth,
- void * const pvParameters,
- UBaseType_t uxPriority,
- StackType_t * const puxStackBuffer,
- StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL;
@@ -63,17 +49,11 @@
TaskStatus_t * pxTaskStatus,
BaseType_t xGetFreeStackSpace,
eTaskState eState ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskPrioritySet( TaskHandle_t xTask,
- UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL;
char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
@@ -84,16 +64,14 @@
void * pvValue ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
BaseType_t xIndex ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
- void * pvParameter ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
const UBaseType_t uxArraySize,
configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL;
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL;
configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
UBaseType_t uxIndexToNotify,
uint32_t ulValue,
@@ -112,14 +90,55 @@
uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
UBaseType_t uxIndexToClear,
uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL;
-TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL;
-void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL;
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) FREERTOS_SYSTEM_CALL;
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint16_t usStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
+TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint32_t ulStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t * const puxStackBuffer,
+ StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION;
+void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
+void MPU_vTaskPrioritySet( TaskHandle_t xTask,
+ UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION;
+TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
+ void * pvParameter ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
+void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
+ const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION;
+UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/* MPU versions of queue.h API functions. */
BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
@@ -136,15 +155,6 @@
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
- StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
- const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
- const UBaseType_t uxInitialCount,
- StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
@@ -153,65 +163,97 @@
const char * pcName ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength,
- const UBaseType_t uxItemSize,
- const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
- const UBaseType_t uxItemSize,
- uint8_t * pucQueueStorage,
- StaticQueue_t * pxStaticQueue,
- const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
-QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
- QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
- BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue,
UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+void MPU_vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
+ StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t * pucQueueStorage,
+ StaticQueue_t * pxStaticQueue,
+ const uint8_t ucQueueType ) PRIVILEGED_FUNCTION;
+QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
+ BaseType_t xNewQueue ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION;
+QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+
/* MPU versions of timers.h API functions. */
-TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
- const TickType_t xTimerPeriodInTicks,
- const UBaseType_t uxAutoReload,
- void * const pvTimerID,
- TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL;
-TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
- const TickType_t xTimerPeriodInTicks,
- const UBaseType_t uxAutoReload,
- void * const pvTimerID,
- TimerCallbackFunction_t pxCallbackFunction,
- StaticTimer_t * pxTimerBuffer ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
void * pvNewID ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend,
- void * pvParameter1,
- uint32_t ulParameter2,
- TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
- const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
-UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
const BaseType_t xCommandID,
const TickType_t xOptionalValue,
BaseType_t * const pxHigherPriorityTaskWoken,
const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION;
+TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t * pxTimerBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION;
/* MPU versions of event_group.h API functions. */
-EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL;
-EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToWaitFor,
const BaseType_t xClearOnExit,
@@ -225,8 +267,26 @@
const EventBits_t uxBitsToSet,
const EventBits_t uxBitsToWaitFor,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL;
-UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+#if ( configUSE_TRACE_FACILITY == 1 )
+ UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+ void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL;
+#endif /* ( configUSE_TRACE_FACILITY == 1 )*/
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+EventGroupHandle_t MPU_xEventGroupCreate( void ) PRIVILEGED_FUNCTION;
+EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) PRIVILEGED_FUNCTION;
+void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
/* MPU versions of message/stream_buffer.h API functions. */
size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
@@ -237,28 +297,45 @@
void * pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
-size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
-void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
-BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL;
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer,
StreamBufferCallbackFunction_t pxSendCompletedCallback,
- StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer,
uint8_t * const pucStreamBufferStorageArea,
StaticStreamBuffer_t * const pxStaticStreamBuffer,
StreamBufferCallbackFunction_t pxSendCompletedCallback,
- StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL;
-
-
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
+void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers,
+ uint8_t * ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t * ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION;
+size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
#endif /* MPU_PROTOTYPES_H */
diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h
index fb8aedf..020efc3 100644
--- a/include/mpu_wrappers.h
+++ b/include/mpu_wrappers.h
@@ -47,114 +47,184 @@
*/
/* Map standard task.h API functions to the MPU equivalents. */
- #define xTaskCreate MPU_xTaskCreate
- #define xTaskCreateStatic MPU_xTaskCreateStatic
- #define vTaskDelete MPU_vTaskDelete
- #define vTaskDelay MPU_vTaskDelay
- #define xTaskDelayUntil MPU_xTaskDelayUntil
- #define xTaskAbortDelay MPU_xTaskAbortDelay
- #define uxTaskPriorityGet MPU_uxTaskPriorityGet
- #define eTaskGetState MPU_eTaskGetState
- #define vTaskGetInfo MPU_vTaskGetInfo
- #define vTaskPrioritySet MPU_vTaskPrioritySet
- #define vTaskSuspend MPU_vTaskSuspend
- #define vTaskResume MPU_vTaskResume
- #define vTaskSuspendAll MPU_vTaskSuspendAll
- #define xTaskResumeAll MPU_xTaskResumeAll
- #define xTaskGetTickCount MPU_xTaskGetTickCount
- #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
- #define pcTaskGetName MPU_pcTaskGetName
- #define xTaskGetHandle MPU_xTaskGetHandle
- #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
- #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
- #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
- #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
- #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
- #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
- #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
- #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
- #define uxTaskGetSystemState MPU_uxTaskGetSystemState
- #define vTaskList MPU_vTaskList
- #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats
- #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter
- #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent
- #define xTaskGenericNotify MPU_xTaskGenericNotify
- #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait
- #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake
- #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear
- #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear
- #define xTaskCatchUpTicks MPU_xTaskCatchUpTicks
+ #define vTaskDelay MPU_vTaskDelay
+ #define xTaskDelayUntil MPU_xTaskDelayUntil
+ #define xTaskAbortDelay MPU_xTaskAbortDelay
+ #define uxTaskPriorityGet MPU_uxTaskPriorityGet
+ #define eTaskGetState MPU_eTaskGetState
+ #define vTaskGetInfo MPU_vTaskGetInfo
+ #define vTaskSuspend MPU_vTaskSuspend
+ #define vTaskResume MPU_vTaskResume
+ #define xTaskGetTickCount MPU_xTaskGetTickCount
+ #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
+ #define pcTaskGetName MPU_pcTaskGetName
+ #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
+ #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
+ #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
+ #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
+ #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
+ #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
+ #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
+ #define uxTaskGetSystemState MPU_uxTaskGetSystemState
+ #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter
+ #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent
+ #define xTaskGenericNotify MPU_xTaskGenericNotify
+ #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait
+ #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake
+ #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear
+ #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear
+ #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
+ #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
+ #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
+ #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
- #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
- #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
- #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
- #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define ulTaskGetRunTimeCounter MPU_ulTaskGetRunTimeCounter
+ #define ulTaskGetRunTimePercent MPU_ulTaskGetRunTimePercent
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #define xTaskCreate MPU_xTaskCreate
+ #define xTaskCreateStatic MPU_xTaskCreateStatic
+ #define vTaskDelete MPU_vTaskDelete
+ #define vTaskPrioritySet MPU_vTaskPrioritySet
+ #define xTaskGetHandle MPU_xTaskGetHandle
+ #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xTaskCreateRestricted MPU_xTaskCreateRestricted
+ #define xTaskCreateRestrictedStatic MPU_xTaskCreateRestrictedStatic
+ #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions
+ #define xTaskGetStaticBuffers MPU_xTaskGetStaticBuffers
+ #define uxTaskPriorityGetFromISR MPU_uxTaskPriorityGetFromISR
+ #define xTaskResumeFromISR MPU_xTaskResumeFromISR
+ #define xTaskGetApplicationTaskTagFromISR MPU_xTaskGetApplicationTaskTagFromISR
+ #define xTaskGenericNotifyFromISR MPU_xTaskGenericNotifyFromISR
+ #define vTaskGenericNotifyGiveFromISR MPU_vTaskGenericNotifyGiveFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard queue.h API functions to the MPU equivalents. */
- #define xQueueGenericSend MPU_xQueueGenericSend
- #define xQueueReceive MPU_xQueueReceive
- #define xQueuePeek MPU_xQueuePeek
- #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
- #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
- #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
+ #define xQueueGenericSend MPU_xQueueGenericSend
+ #define xQueueReceive MPU_xQueueReceive
+ #define xQueuePeek MPU_xQueuePeek
+ #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
+ #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
+ #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
+ #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
+ #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
+ #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
+ #define xQueueAddToSet MPU_xQueueAddToSet
+ #define xQueueSelectFromSet MPU_xQueueSelectFromSet
+
+ #if ( configQUEUE_REGISTRY_SIZE > 0 )
+ #define vQueueAddToRegistry MPU_vQueueAddToRegistry
+ #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
+ #define pcQueueGetName MPU_pcQueueGetName
+ #endif /* #if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
#define vQueueDelete MPU_vQueueDelete
#define xQueueCreateMutex MPU_xQueueCreateMutex
#define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic
#define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore
#define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic
- #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
- #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
- #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
#define xQueueGenericCreate MPU_xQueueGenericCreate
#define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic
- #define xQueueCreateSet MPU_xQueueCreateSet
- #define xQueueAddToSet MPU_xQueueAddToSet
- #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
- #define xQueueSelectFromSet MPU_xQueueSelectFromSet
#define xQueueGenericReset MPU_xQueueGenericReset
+ #define xQueueCreateSet MPU_xQueueCreateSet
+ #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
- #if ( configQUEUE_REGISTRY_SIZE > 0 )
- #define vQueueAddToRegistry MPU_vQueueAddToRegistry
- #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
- #define pcQueueGetName MPU_pcQueueGetName
- #endif
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xQueueGenericGetStaticBuffers MPU_xQueueGenericGetStaticBuffers
+ #define xQueueGenericSendFromISR MPU_xQueueGenericSendFromISR
+ #define xQueueGiveFromISR MPU_xQueueGiveFromISR
+ #define xQueuePeekFromISR MPU_xQueuePeekFromISR
+ #define xQueueReceiveFromISR MPU_xQueueReceiveFromISR
+ #define xQueueIsQueueEmptyFromISR MPU_xQueueIsQueueEmptyFromISR
+ #define xQueueIsQueueFullFromISR MPU_xQueueIsQueueFullFromISR
+ #define uxQueueMessagesWaitingFromISR MPU_uxQueueMessagesWaitingFromISR
+ #define xQueueGetMutexHolderFromISR MPU_xQueueGetMutexHolderFromISR
+ #define xQueueSelectFromSetFromISR MPU_xQueueSelectFromSetFromISR
+ #endif /* if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard timer.h API functions to the MPU equivalents. */
- #define pvTimerGetTimerID MPU_pvTimerGetTimerID
- #define vTimerSetTimerID MPU_vTimerSetTimerID
- #define xTimerIsTimerActive MPU_xTimerIsTimerActive
- #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle
- #define pcTimerGetName MPU_pcTimerGetName
- #define vTimerSetReloadMode MPU_vTimerSetReloadMode
- #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode
- #define xTimerGetPeriod MPU_xTimerGetPeriod
- #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
- #define xTimerGenericCommand MPU_xTimerGenericCommand
+ #define pvTimerGetTimerID MPU_pvTimerGetTimerID
+ #define vTimerSetTimerID MPU_vTimerSetTimerID
+ #define xTimerIsTimerActive MPU_xTimerIsTimerActive
+ #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle
+ #define xTimerGenericCommand MPU_xTimerGenericCommand
+ #define pcTimerGetName MPU_pcTimerGetName
+ #define vTimerSetReloadMode MPU_vTimerSetReloadMode
+ #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode
+ #define xTimerGetPeriod MPU_xTimerGetPeriod
+ #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xTimerGetReloadMode MPU_xTimerGetReloadMode
+ #define xTimerCreate MPU_xTimerCreate
+ #define xTimerCreateStatic MPU_xTimerCreateStatic
+ #define xTimerGetStaticBuffer MPU_xTimerGetStaticBuffer
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard event_group.h API functions to the MPU equivalents. */
- #define xEventGroupCreate MPU_xEventGroupCreate
- #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
- #define xEventGroupWaitBits MPU_xEventGroupWaitBits
- #define xEventGroupClearBits MPU_xEventGroupClearBits
- #define xEventGroupSetBits MPU_xEventGroupSetBits
- #define xEventGroupSync MPU_xEventGroupSync
- #define vEventGroupDelete MPU_vEventGroupDelete
+ #define xEventGroupWaitBits MPU_xEventGroupWaitBits
+ #define xEventGroupClearBits MPU_xEventGroupClearBits
+ #define xEventGroupSetBits MPU_xEventGroupSetBits
+ #define xEventGroupSync MPU_xEventGroupSync
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ #define uxEventGroupGetNumber MPU_uxEventGroupGetNumber
+ #define vEventGroupSetNumber MPU_vEventGroupSetNumber
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+ #define xEventGroupCreate MPU_xEventGroupCreate
+ #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
+ #define vEventGroupDelete MPU_vEventGroupDelete
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xEventGroupGetStaticBuffer MPU_xEventGroupGetStaticBuffer
+ #define xEventGroupClearBitsFromISR MPU_xEventGroupClearBitsFromISR
+ #define xEventGroupSetBitsFromISR MPU_xEventGroupSetBitsFromISR
+ #define xEventGroupGetBitsFromISR MPU_xEventGroupGetBitsFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Map standard message/stream_buffer.h API functions to the MPU
* equivalents. */
#define xStreamBufferSend MPU_xStreamBufferSend
#define xStreamBufferReceive MPU_xStreamBufferReceive
- #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes
- #define vStreamBufferDelete MPU_vStreamBufferDelete
#define xStreamBufferIsFull MPU_xStreamBufferIsFull
#define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty
- #define xStreamBufferReset MPU_xStreamBufferReset
#define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable
#define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable
#define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel
- #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
- #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic
+ #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+
+ #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
+ #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic
+ #define vStreamBufferDelete MPU_vStreamBufferDelete
+ #define xStreamBufferReset MPU_xStreamBufferReset
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ #define xStreamBufferGetStaticBuffers MPU_xStreamBufferGetStaticBuffers
+ #define xStreamBufferSendFromISR MPU_xStreamBufferSendFromISR
+ #define xStreamBufferReceiveFromISR MPU_xStreamBufferReceiveFromISR
+ #define xStreamBufferSendCompletedFromISR MPU_xStreamBufferSendCompletedFromISR
+ #define xStreamBufferReceiveCompletedFromISR MPU_xStreamBufferReceiveCompletedFromISR
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/* Remove the privileged function macro, but keep the PRIVILEGED_DATA
* macro so applications can place data in privileged access sections
diff --git a/include/portable.h b/include/portable.h
index 52d5434..5734eb7 100644
--- a/include/portable.h
+++ b/include/portable.h
@@ -110,13 +110,15 @@
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION;
#else
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
- #endif
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION;
+ #endif /* if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) */
#else /* if ( portUSING_MPU_WRAPPERS == 1 ) */
#if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
@@ -229,6 +231,22 @@
uint32_t ulStackDepth ) PRIVILEGED_FUNCTION;
#endif
+/**
+ * @brief Checks if the calling task is authorized to access the given buffer.
+ *
+ * @param pvBuffer The buffer which the calling task wants to access.
+ * @param ulBufferLength The length of the pvBuffer.
+ * @param ulAccessRequested The permissions that the calling task wants.
+ *
+ * @return pdTRUE if the calling task is authorized to access the buffer,
+ * pdFALSE otherwise.
+ */
+#if ( portUSING_MPU_WRAPPERS == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) PRIVILEGED_FUNCTION;
+#endif
+
/* *INDENT-OFF* */
#ifdef __cplusplus
}
diff --git a/include/queue.h b/include/queue.h
index e5092ac..1c1b982 100644
--- a/include/queue.h
+++ b/include/queue.h
@@ -1752,7 +1752,7 @@
UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION;
UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
-
+UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
/* *INDENT-OFF* */
#ifdef __cplusplus
diff --git a/include/task.h b/include/task.h
index 702f74d..af17ced 100644
--- a/include/task.h
+++ b/include/task.h
@@ -66,6 +66,11 @@
#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL )
#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL )
+/* MPU region permissions stored in MPU settings to
+ * authorize access requests. */
+#define tskMPU_READ_PERMISSION ( 1UL << 0UL )
+#define tskMPU_WRITE_PERMISSION ( 1UL << 1UL )
+
/* The direct to task notification feature used to have only a single notification
* per task. Now there is an array of notifications per task that is dimensioned by
* configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the
@@ -3192,6 +3197,14 @@
*/
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
+#if ( portUSING_MPU_WRAPPERS == 1 )
+
+/*
+ * For internal use only. Get MPU settings associated with a task.
+ */
+ xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+#endif /* portUSING_MPU_WRAPPERS */
/* *INDENT-OFF* */
#ifdef __cplusplus
diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py
index d064969..3609c67 100644
--- a/portable/ARMv8M/copy_files.py
+++ b/portable/ARMv8M/copy_files.py
@@ -73,16 +73,22 @@
'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')],
'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')],
'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')],
'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')],
'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')],
'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')],
'ARM_CM85' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')],
'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'),
+ os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'),
os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')]
},
'IAR':{
@@ -91,16 +97,22 @@
'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')],
'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')],
'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')],
'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')],
'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')],
'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')],
'ARM_CM85' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')],
'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'),
+ os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'),
os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')]
},
}
diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/ARMv8M/non_secure/port.c
+++ b/portable/ARMv8M/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..a1e5ce0
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2419 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c
index 44f159a..64a24f5 100644
--- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c
@@ -44,6 +44,109 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -54,83 +157,24 @@
" ldr r3, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r2] \n"/* Program RNR = 4. */
- " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r2] \n"/* Program RNR = 5. */
- " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r2] \n"/* Program RNR = 6. */
- " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r2] \n"/* Program RNR = 7. */
- " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r3 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -237,6 +281,167 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* Restore LR. */
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " stmia r2!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r2!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " mov r6, lr \n" /* r6 = LR. */
+ " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r4} \n" /* LR is now in r4. */
+ " mov lr, r4 \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,52 +465,26 @@
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #48 \n"/* r2 = r2 - 48. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
+ " mov r4, r8 \n"/* r4 = r8. */
+ " mov r5, r9 \n"/* r5 = r9. */
+ " mov r6, r10 \n"/* r6 = r10. */
+ " mov r7, r11 \n"/* r7 = r11. */
+ " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" \n"
" select_next_task: \n"
" cpsid i \n"
@@ -316,85 +495,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r4] \n"/* Program RNR = 4. */
- " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r4] \n"/* Program RNR = 5. */
- " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r4] \n"/* Program RNR = 6. */
- " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r4] \n"/* Program RNR = 7. */
- " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" adds r2, r2, #16 \n"/* Move to the high registers. */
@@ -411,16 +527,62 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r2, [r0, #24] \n"
+ " subs r2, #2 \n"
+ " ldrb r3, [r2, #0] \n"
+ " cmp r3, %0 \n"
+ " beq system_call_enter \n"
+ " cmp r3, %1 \n"
+ " beq system_call_enter_1 \n"
+ " cmp r3, %2 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_enter_1: \n"
+ " b vSystemCallEnter_1 \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -443,6 +605,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..a1e5ce0
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2419 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c
index 7fb7b5a..b11b6e9 100644
--- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c
@@ -44,6 +44,106 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -54,78 +154,21 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -232,6 +275,136 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ " stmia r1!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r1!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " mov r5, lr \n" /* r5 = LR. */
+ " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -241,30 +414,16 @@
" mrs r0, psp \n"/* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #else /* configENABLE_MPU */
- " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
+ " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
+ " str r0, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
+ " mov r4, r8 \n"/* r4 = r8. */
+ " mov r5, r9 \n"/* r5 = r9. */
+ " mov r6, r10 \n"/* r6 = r10. */
+ " mov r7, r11 \n"/* r7 = r11. */
+ " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
@@ -274,88 +433,76 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " adds r0, r0, #28 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- " bx r3 \n"
- #else /* configENABLE_MPU */
- " adds r0, r0, #24 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- " bx r3 \n"
- #endif /* configENABLE_MPU */
+ " adds r0, r0, #24 \n"/* Move to the high registers. */
+ " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
+ " mov r8, r4 \n"/* r8 = r4. */
+ " mov r9, r5 \n"/* r9 = r5. */
+ " mov r10, r6 \n"/* r10 = r6. */
+ " mov r11, r7 \n"/* r11 = r7. */
+ " msr psp, r0 \n"/* Remember the new top of stack for the task. */
+ " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
+ " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
+ " bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r2, [r0, #24] \n"
+ " subs r2, #2 \n"
+ " ldrb r3, [r2, #0] \n"
+ " cmp r3, %0 \n"
+ " beq system_call_enter \n"
+ " cmp r3, %1 \n"
+ " beq system_call_enter_1 \n"
+ " cmp r3, %2 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_enter_1: \n"
+ " b vSystemCallEnter_1 \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -378,4 +525,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c
index 9f9b2e6..f7ec7d9 100644
--- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c
@@ -40,95 +40,120 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -236,6 +261,160 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,20 +439,11 @@
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
@@ -284,26 +454,14 @@
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
@@ -318,83 +476,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
@@ -409,17 +506,60 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -437,6 +577,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c
index a78529d..504b6bf 100644
--- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c
+++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c
@@ -40,6 +40,88 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -50,80 +132,23 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -231,6 +256,129 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -238,21 +386,16 @@
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
+ " \n"
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
@@ -270,52 +413,7 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
+ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -323,28 +421,66 @@
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -362,4 +498,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..867642b
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1623 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s
index fffed8d..648ae00 100644
--- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s
@@ -33,12 +33,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -98,65 +107,99 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r2] /* Program RNR = 4. */
- ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r2] /* Program RNR = 5. */
- ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r2] /* Program RNR = 6. */
- ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r2] /* Program RNR = 7. */
- ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -167,6 +210,7 @@
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -199,6 +243,149 @@
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* Restore LR. */
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+ stmia r2!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r2!, {r4-r7} /* Store r8-r11. */
+ ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ mov r6, lr /* r6 = LR. */
+ stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r4} /* LR is now in r4. */
+ mov lr, r4
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
@@ -216,41 +403,18 @@
bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
+
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
+
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #48 /* r2 = r2 - 48. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
@@ -261,7 +425,6 @@
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
select_next_task:
cpsid i
@@ -272,68 +435,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r4] /* Program RNR = 4. */
- ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r4] /* Program RNR = 5. */
- ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r4] /* Program RNR = 6. */
- ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r4] /* Program RNR = 7. */
- ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -350,7 +451,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
adds r2, r2, #16 /* Move to the high registers. */
@@ -363,8 +463,45 @@
subs r2, r2, #32 /* Go back to the low registers. */
ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r2, [r0, #24]
+ subs r2, #2
+ ldrb r3, [r2, #0]
+ cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq system_call_enter
+ cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq system_call_enter_1
+ cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_enter_1:
+ b vSystemCallEnter_1
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
movs r0, #4
mov r1, lr
@@ -375,6 +512,8 @@
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..867642b
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1623 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s
index 62bd387..8f77c4d 100644
--- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -88,63 +97,97 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs_first_task:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -153,6 +196,7 @@
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -187,23 +231,127 @@
bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ stmia r1!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r1!, {r4-r7} /* Store r8-r11. */
+ ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r2, psp /* r2 = PSP. */
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ mov r5, lr /* r5 = LR. */
+ stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r0, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#else /* configENABLE_MPU */
+
subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */
str r0, [r1] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
@@ -214,7 +362,6 @@
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#endif /* configENABLE_MPU */
cpsid i
bl vTaskSwitchContext
@@ -224,63 +371,6 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- adds r0, r0, #28 /* Move to the high registers. */
- ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
- mov r8, r4 /* r8 = r4. */
- mov r9, r5 /* r9 = r5. */
- mov r10, r6 /* r10 = r6. */
- mov r11, r7 /* r11 = r7. */
- msr psp, r0 /* Remember the new top of stack for the task. */
- subs r0, r0, #44 /* Move to the starting of the saved context. */
- ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
- bx r3
-#else /* configENABLE_MPU */
adds r0, r0, #24 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
mov r8, r4 /* r8 = r4. */
@@ -292,9 +382,45 @@
ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
bx r3
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r2, [r0, #24]
+ subs r2, #2
+ ldrb r3, [r2, #0]
+ cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq system_call_enter
+ cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq system_call_enter_1
+ cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_enter_1:
+ b vSystemCallEnter_1
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
movs r0, #4
mov r1, lr
@@ -305,6 +431,8 @@
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s
index a193cd7..15e74ff 100644
--- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s
@@ -32,12 +32,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -89,50 +98,81 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -145,6 +185,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -183,6 +224,143 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
@@ -200,20 +378,11 @@
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
@@ -224,17 +393,6 @@
it eq
vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
adds r2, r2, #12 /* r2 = r2 + 12. */
@@ -243,7 +401,6 @@
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r2, r2, #12 /* r2 = r2 - 12. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
select_next_task:
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
@@ -258,51 +415,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -319,7 +431,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
@@ -330,14 +441,50 @@
#endif /* configENABLE_FPU || configENABLE_MVE */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s
index 581b84d..ec52025 100644
--- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s
+++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -79,48 +88,79 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -131,6 +171,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -169,6 +210,114 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@@ -176,16 +325,10 @@
it eq
vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
-#else /* configENABLE_MPU */
+
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
@@ -203,37 +346,7 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -241,22 +354,53 @@
vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
-#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/ARMv8M/non_secure/portmacrocommon.h
+++ b/portable/ARMv8M/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c
index 92841e1..c995195 100644
--- a/portable/Common/mpu_wrappers.c
+++ b/portable/Common/mpu_wrappers.c
@@ -48,7 +48,7 @@
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/*-----------------------------------------------------------*/
-#if ( portUSING_MPU_WRAPPERS == 1 )
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode,
@@ -2537,5 +2537,5 @@
#endif
/*-----------------------------------------------------------*/
-#endif /* portUSING_MPU_WRAPPERS == 1 */
+#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) */
/*-----------------------------------------------------------*/
diff --git a/portable/Common/mpu_wrappers_v2.c b/portable/Common/mpu_wrappers_v2.c
new file mode 100644
index 0000000..1e28d8e
--- /dev/null
+++ b/portable/Common/mpu_wrappers_v2.c
@@ -0,0 +1,4121 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/*
+ * Implementation of the wrapper functions used to raise the processor privilege
+ * before calling a standard FreeRTOS API function.
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+#include "mpu_prototypes.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ #ifndef configPROTECTED_KERNEL_OBJECT_POOL_SIZE
+ #error configPROTECTED_KERNEL_OBJECT_POOL_SIZE must be defined to maximum number of kernel objects in the application.
+ #endif
+
+/**
+ * @brief Offset added to the index before returning to the user.
+ *
+ * If the actual handle is stored at index i, ( i + INDEX_OFFSET )
+ * is returned to the user.
+ */
+ #define INDEX_OFFSET 1
+
+/**
+ * @brief Opaque type for a kernel object.
+ */
+ struct OpaqueObject;
+ typedef struct OpaqueObject * OpaqueObjectHandle_t;
+
+/**
+ * @brief Defines kernel object in the kernel object pool.
+ */
+ typedef struct KernelObject
+ {
+ OpaqueObjectHandle_t xInternalObjectHandle;
+ uint32_t ulKernelObjectType;
+ void * pvKernelObjectData;
+ } KernelObject_t;
+
+/**
+ * @brief Kernel object types.
+ */
+ #define KERNEL_OBJECT_TYPE_INVALID ( 0UL )
+ #define KERNEL_OBJECT_TYPE_QUEUE ( 1UL )
+ #define KERNEL_OBJECT_TYPE_TASK ( 2UL )
+ #define KERNEL_OBJECT_TYPE_STREAM_BUFFER ( 3UL )
+ #define KERNEL_OBJECT_TYPE_EVENT_GROUP ( 4UL )
+ #define KERNEL_OBJECT_TYPE_TIMER ( 5UL )
+
+/**
+ * @brief Checks whether an external index is valid or not.
+ */
+ #define IS_EXTERNAL_INDEX_VALID( lIndex ) \
+ ( ( ( lIndex ) >= INDEX_OFFSET ) && \
+ ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) )
+
+/**
+ * @brief Checks whether an internal index is valid or not.
+ */
+ #define IS_INTERNAL_INDEX_VALID( lIndex ) \
+ ( ( ( lIndex ) >= 0 ) && \
+ ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) )
+
+/**
+ * @brief Converts an internal index into external.
+ */
+ #define CONVERT_TO_EXTERNAL_INDEX( lIndex ) ( ( lIndex ) + INDEX_OFFSET )
+
+/**
+ * @brief Converts an external index into internal.
+ */
+ #define CONVERT_TO_INTERNAL_INDEX( lIndex ) ( ( lIndex ) - INDEX_OFFSET )
+
+/**
+ * @brief Get the index of a free slot in the kernel object pool.
+ *
+ * If a free slot is found, this function marks the slot as
+ * "not free".
+ *
+ * @return Index of a free slot is returned, if a free slot is
+ * found. Otherwise -1 is returned.
+ */
+ static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Set the given index as free in the kernel object pool.
+ *
+ * @param lIndex The index to set as free.
+ */
+ static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Get the index at which a given kernel object is stored.
+ *
+ * @param xHandle The given kernel object handle.
+ * @param ulKernelObjectType The kernel object type.
+ *
+ * @return Index at which the kernel object is stored if it is a valid
+ * handle, -1 otherwise.
+ */
+ static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Store the given kernel object handle at the given index in
+ * the kernel object pool.
+ *
+ * @param lIndex Index to store the given handle at.
+ * @param xHandle Kernel object handle to store.
+ * @param pvKernelObjectData The data associated with the kernel object.
+ * Currently, only used for timer objects to store timer callback.
+ * @param ulKernelObjectType The kernel object type.
+ */
+ static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex,
+ OpaqueObjectHandle_t xHandle,
+ void * pvKernelObjectData,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+/**
+ * @brief Get the kernel object handle at the given index from
+ * the kernel object pool.
+ *
+ * @param lIndex Index at which to get the kernel object handle.
+ * @param ulKernelObjectType The kernel object type.
+ *
+ * @return The kernel object handle at the index.
+ */
+ static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex,
+ uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION;
+
+ #if ( configUSE_TIMERS == 1 )
+
+/**
+ * @brief The function registered as callback for all the timers.
+ *
+ * We intercept all the timer callbacks so that we can call application
+ * callbacks with opaque handle.
+ *
+ * @param xInternalHandle The internal timer handle.
+ */
+ static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) PRIVILEGED_FUNCTION;
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+
+/*
+ * Wrappers to keep all the casting in one place.
+ */
+ #define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+ #define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
+ #define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE )
+ #endif
+
+/*
+ * Wrappers to keep all the casting in one place for Task APIs.
+ */
+ #define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK )
+ #define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK )
+ #define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK )
+
+/*
+ * Wrappers to keep all the casting in one place for Event Group APIs.
+ */
+ #define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+ #define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+ #define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP )
+
+/*
+ * Wrappers to keep all the casting in one place for Stream Buffer APIs.
+ */
+ #define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+ #define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+ #define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
+
+ #if ( configUSE_TIMERS == 1 )
+
+/*
+ * Wrappers to keep all the casting in one place for Timer APIs.
+ */
+ #define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER )
+ #define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER )
+ #define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER )
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+
+/*-----------------------------------------------------------*/
+
+/**
+ * @brief Kernel object pool.
+ */
+ PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL };
+/*-----------------------------------------------------------*/
+
+ static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lFreeIndex = -1;
+
+ /* This function is called only from resource create APIs
+ * which are not supposed to be called from ISRs. Therefore,
+ * we only need to suspend the scheduler and do not require
+ * critical section. */
+ vTaskSuspendAll();
+ {
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL )
+ {
+ /* Mark this index as not free. */
+ xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 );
+ lFreeIndex = i;
+ break;
+ }
+ }
+ }
+ xTaskResumeAll();
+
+ return lFreeIndex;
+ }
+/*-----------------------------------------------------------*/
+
+ static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) /* PRIVILEGED_FUNCTION */
+ {
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+
+ taskENTER_CRITICAL();
+ {
+ xKernelObjectPool[ lIndex ].xInternalObjectHandle = NULL;
+ xKernelObjectPool[ lIndex ].ulKernelObjectType = KERNEL_OBJECT_TYPE_INVALID;
+ xKernelObjectPool[ lIndex ].pvKernelObjectData = NULL;
+ }
+ taskEXIT_CRITICAL();
+ }
+/*-----------------------------------------------------------*/
+
+ static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lIndex = -1;
+
+ configASSERT( xHandle != NULL );
+
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( ( xKernelObjectPool[ i ].xInternalObjectHandle == xHandle ) &&
+ ( xKernelObjectPool[ i ].ulKernelObjectType == ulKernelObjectType ) )
+ {
+ lIndex = i;
+ break;
+ }
+ }
+
+ return lIndex;
+ }
+/*-----------------------------------------------------------*/
+
+ static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex,
+ OpaqueObjectHandle_t xHandle,
+ void * pvKernelObjectData,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+ xKernelObjectPool[ lIndex ].xInternalObjectHandle = xHandle;
+ xKernelObjectPool[ lIndex ].ulKernelObjectType = ulKernelObjectType;
+ xKernelObjectPool[ lIndex ].pvKernelObjectData = pvKernelObjectData;
+ }
+/*-----------------------------------------------------------*/
+
+ static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex,
+ uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */
+ {
+ configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE );
+ configASSERT( xKernelObjectPool[ lIndex ].ulKernelObjectType == ulKernelObjectType );
+ return xKernelObjectPool[ lIndex ].xInternalObjectHandle;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t i, lIndex = -1;
+ TimerHandle_t xExternalHandle = NULL;
+ TimerCallbackFunction_t pxApplicationCallBack = NULL;
+
+ /* Coming from the timer task and therefore, should be valid. */
+ configASSERT( xInternalHandle != NULL );
+
+ for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ )
+ {
+ if( ( ( TimerHandle_t ) xKernelObjectPool[ i ].xInternalObjectHandle == xInternalHandle ) &&
+ ( xKernelObjectPool[ i ].ulKernelObjectType == KERNEL_OBJECT_TYPE_TIMER ) )
+ {
+ lIndex = i;
+ break;
+ }
+ }
+
+ configASSERT( lIndex != -1 );
+ xExternalHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+
+ pxApplicationCallBack = ( TimerCallbackFunction_t ) xKernelObjectPool[ lIndex ].pvKernelObjectData;
+ pxApplicationCallBack( xExternalHandle );
+ }
+
+ #endif /* #if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for tasks APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskDelayUntil == 1 )
+
+ BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime,
+ TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime,
+ TickType_t xTimeIncrement ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsPreviousWakeTimeAccessible = pdFALSE;
+
+ xIsPreviousWakeTimeAccessible = xPortIsAuthorizedToAccessBuffer( pxPreviousWakeTime,
+ sizeof( TickType_t ),
+ ( tskMPU_WRITE_PERMISSION | tskMPU_READ_PERMISSION ) );
+
+ if( xIsPreviousWakeTimeAccessible == pdTRUE )
+ {
+ xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskAbortDelay == 1 )
+
+ BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskAbortDelay( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskAbortDelay( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelay == 1 )
+
+ void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) /* PRIVILEGED_FUNCTION */
+ {
+ vTaskDelay( xTicksToDelay );
+ }
+
+ #endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = configMAX_PRIORITIES;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( pxTask == NULL )
+ {
+ uxReturn = uxTaskPriorityGet( pxTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskPriorityGet( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_eTaskGetState == 1 )
+
+ eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) PRIVILEGED_FUNCTION;
+
+ eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */
+ {
+ eTaskState eReturn = eInvalid;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( pxTask == NULL )
+ {
+ eReturn = eTaskGetState( pxTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ eReturn = eTaskGetState( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return eReturn;
+ }
+
+ #endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vTaskGetInfoImpl( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskGetInfoImpl( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xIsTaskStatusWriteable = pdFALSE;
+
+ xIsTaskStatusWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatus,
+ sizeof( TaskStatus_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsTaskStatusWriteable == pdTRUE )
+ {
+ if( xTask == NULL )
+ {
+ vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskGetInfo( xInternalTaskHandle, pxTaskStatus, xGetFreeStackSpace, eState );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xIdleTaskHandle = NULL;
+
+ xIdleTaskHandle = xTaskGetIdleTaskHandle();
+
+ return xIdleTaskHandle;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( pxTaskToSuspend == NULL )
+ {
+ vTaskSuspend( pxTaskToSuspend );
+ }
+ else
+ {
+ /* After the scheduler starts, only privileged tasks are allowed
+ * to suspend other tasks. */
+ if( ( xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ) || ( portIS_TASK_PRIVILEGED() == pdTRUE ) )
+ {
+ lIndex = ( int32_t ) pxTaskToSuspend;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSuspend( xInternalTaskHandle );
+ }
+ }
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskSuspend == 1 )
+
+ void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) pxTaskToResume;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskResume( xInternalTaskHandle );
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+ TickType_t MPU_xTaskGetTickCountImpl( void ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTaskGetTickCountImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn;
+
+ xReturn = xTaskGetTickCount();
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn;
+
+ uxReturn = uxTaskGetNumberOfTasks();
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
+
+ char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) /* PRIVILEGED_FUNCTION */
+ {
+ char * pcReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTaskToQuery == NULL )
+ {
+ pcReturn = pcTaskGetName( xTaskToQuery );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToQuery;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ pcReturn = pcTaskGetName( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return pcReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = ulTaskGetRunTimeCounter( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = ulTaskGetRunTimeCounter( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = ulTaskGetRunTimePercent( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = ulTaskGetRunTimePercent( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn;
+
+ xReturn = ulTaskGetIdleRunTimePercent();
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) PRIVILEGED_FUNCTION;
+
+ configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ configRUN_TIME_COUNTER_TYPE xReturn;
+
+ xReturn = ulTaskGetIdleRunTimeCounter();
+
+ return xReturn;
+ }
+
+ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask,
+ TaskHookFunction_t pxTagValue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask,
+ TaskHookFunction_t pxTagValue ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( xTask == NULL )
+ {
+ vTaskSetApplicationTaskTag( xTask, pxTagValue );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSetApplicationTaskTag( xInternalTaskHandle, pxTagValue );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHookFunction_t xReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTag( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTag( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTaskToSet == NULL )
+ {
+ vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskSetThreadLocalStoragePointer( xInternalTaskHandle, xIndex, pvValue );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+ void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) PRIVILEGED_FUNCTION;
+
+ void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* PRIVILEGED_FUNCTION */
+ {
+ void * pvReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTaskToQuery == NULL )
+ {
+ pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToQuery;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ pvReturn = pvTaskGetThreadLocalStoragePointer( xInternalTaskHandle, xIndex );
+ }
+ }
+ }
+
+ return pvReturn;
+ }
+
+ #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray,
+ UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray,
+ UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = pdFALSE;
+ UBaseType_t xIsTaskStatusArrayWriteable = pdFALSE;
+ UBaseType_t xIsTotalRunTimeWriteable = pdFALSE;
+
+ xIsTaskStatusArrayWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatusArray,
+ sizeof( TaskStatus_t ) * uxArraySize,
+ tskMPU_WRITE_PERMISSION );
+
+ if( pulTotalRunTime != NULL )
+ {
+ xIsTotalRunTimeWriteable = xPortIsAuthorizedToAccessBuffer( pulTotalRunTime,
+ sizeof( configRUN_TIME_COUNTER_TYPE ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( xIsTaskStatusArrayWriteable == pdTRUE ) &&
+ ( ( pulTotalRunTime == NULL ) || ( xIsTotalRunTimeWriteable == pdTRUE ) ) )
+ {
+ uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
+
+ configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ configSTACK_DEPTH_TYPE uxReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark2( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskGetStackHighWaterMark2( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ TaskHandle_t xExternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+
+ if( xInternalTaskHandle != NULL )
+ {
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+ BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = taskSCHEDULER_NOT_STARTED;
+
+ xReturn = xTaskGetSchedulerState();
+
+ return xReturn;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xIsTimeOutWriteable = pdFALSE;
+
+ xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut,
+ sizeof( TimeOut_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsTimeOutWriteable == pdTRUE )
+ {
+ vTaskSetTimeOutState( pxTimeOut );
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ BaseType_t xIsTimeOutWriteable = pdFALSE;
+ BaseType_t xIsTicksToWaitWriteable = pdFALSE;
+
+ xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut,
+ sizeof( TimeOut_t ),
+ tskMPU_WRITE_PERMISSION );
+ xIsTicksToWaitWriteable = xPortIsAuthorizedToAccessBuffer( pxTicksToWait,
+ sizeof( TickType_t ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( ( xIsTimeOutWriteable == pdTRUE ) && ( xIsTicksToWaitWriteable == pdTRUE ) )
+ {
+ xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ BaseType_t xIsPreviousNotificationValueWriteable = pdFALSE;
+
+ if( pulPreviousNotificationValue != NULL )
+ {
+ xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pulPreviousNotificationValue,
+ sizeof( uint32_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pulPreviousNotificationValue == NULL ) || ( xIsPreviousNotificationValueWriteable == pdTRUE ) )
+ {
+ lIndex = ( int32_t ) xTaskToNotify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotify( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsNotificationValueWritable = pdFALSE;
+
+ if( pulNotificationValue != NULL )
+ {
+ xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pulNotificationValue,
+ sizeof( uint32_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pulNotificationValue == NULL ) || ( xIsNotificationValueWritable == pdTRUE ) )
+ {
+ xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulReturn;
+
+ ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
+
+ return ulReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotifyStateClear( xInternalTaskHandle, uxIndexToClear );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION;
+
+ uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulReturn = 0;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ ulReturn = ulTaskGenericNotifyValueClear( xInternalTaskHandle, uxIndexToClear, ulBitsToClear );
+ }
+ }
+ }
+
+ return ulReturn;
+ }
+
+ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Task APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode,
+ const char * const pcName,
+ uint16_t usStackDepth,
+ void * pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ /* xTaskCreate() can only be used to create privileged tasks in MPU port. */
+ if( ( uxPriority & portPRIVILEGE_BIT ) != 0 )
+ {
+ xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ const char * const pcName,
+ const uint32_t ulStackDepth,
+ void * const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t * const puxStackBuffer,
+ StaticTask_t * const pxTaskBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xExternalTaskHandle = NULL;
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTaskHandle = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_vTaskDelete == 1 )
+
+ void MPU_vTaskDelete( TaskHandle_t pxTaskToDelete ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( pxTaskToDelete == NULL )
+ {
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ vTaskDelete( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTaskToDelete;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskDelete( xInternalTaskHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+ }
+
+ #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
+/*-----------------------------------------------------------*/
+
+
+ #if ( INCLUDE_vTaskPrioritySet == 1 )
+
+ void MPU_vTaskPrioritySet( TaskHandle_t pxTask,
+ UBaseType_t uxNewPriority ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( pxTask == NULL )
+ {
+ vTaskPrioritySet( pxTask, uxNewPriority );
+ }
+ else
+ {
+ lIndex = ( int32_t ) pxTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskPrioritySet( xInternalTaskHandle, uxNewPriority );
+ }
+ }
+ }
+ }
+
+ #endif /* if ( INCLUDE_vTaskPrioritySet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_xTaskGetHandle == 1 )
+
+ TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ TaskHandle_t xExternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ xInternalTaskHandle = xTaskGetHandle( pcNameToQuery );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle );
+
+ if( lIndex != -1 )
+ {
+ xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+
+ return xExternalTaskHandle;
+ }
+
+ #endif /* if ( INCLUDE_xTaskGetHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask,
+ void * pvParameter ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskCallApplicationTaskHook( xInternalTaskHandle, pvParameter );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xReturn = xTaskCreateRestricted( pxTaskDefinition, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
+ TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xReturn = xTaskCreateRestrictedStatic( pxTaskDefinition, &( xInternalTaskHandle ) );
+
+ if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) )
+ {
+ MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle );
+
+ if( pxCreatedTask != NULL )
+ {
+ *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ void MPU_vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
+ const MemoryRegion_t * const xRegions ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+
+ if( xTaskToModify == NULL )
+ {
+ vTaskAllocateMPURegions( xTaskToModify, xRegions );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTaskToModify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskAllocateMPURegions( xInternalTaskHandle, xRegions );
+ }
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask,
+ StackType_t ** ppuxStackBuffer,
+ StaticTask_t ** ppxTaskBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xInternalTaskHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xReturn = pdFALSE;
+
+ if( xTask == NULL )
+ {
+ xInternalTaskHandle = xTaskGetCurrentTaskHandle();
+ xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+ UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = configMAX_PRIORITIES;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ uxReturn = uxTaskPriorityGetFromISR( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ uxReturn = uxTaskPriorityGetFromISR( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* #if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
+
+ BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToResume;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskResumeFromISR( xInternalTaskHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )*/
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+ TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHookFunction_t xReturn = NULL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ if( xTask == NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTagFromISR( xTask );
+ }
+ else
+ {
+ lIndex = ( int32_t ) xTask;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGetApplicationTaskTagFromISR( xInternalTaskHandle );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToNotify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ xReturn = xTaskGenericNotifyFromISR( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*---------------------------------------------------------------------------------------*/
+
+ #if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+ void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ TaskHandle_t xInternalTaskHandle = NULL;
+
+ lIndex = ( int32_t ) xTaskToNotify;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTaskHandle != NULL )
+ {
+ vTaskGenericNotifyGiveFromISR( xInternalTaskHandle, uxIndexToNotify, pxHigherPriorityTaskWoken );
+ }
+ }
+ }
+ #endif /*#if ( configUSE_TASK_NOTIFICATIONS == 1 )*/
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for queue APIs. */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsItemToQueueReadable = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ if( pvItemToQueue != NULL )
+ {
+ xIsItemToQueueReadable = xPortIsAuthorizedToAccessBuffer( pvItemToQueue,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_READ_PERMISSION );
+ }
+
+ if( ( pvItemToQueue == NULL ) || ( xIsItemToQueueReadable == pdTRUE ) )
+ {
+ xReturn = xQueueGenericSend( xInternalQueueHandle, pvItemToQueue, xTicksToWait, xCopyPosition );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ UBaseType_t uxReturn = 0;
+
+ lIndex = ( int32_t ) pxQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueMessagesWaiting( xInternalQueueHandle );
+ }
+ }
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ UBaseType_t uxReturn = 0;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueSpacesAvailable( xInternalQueueHandle );
+ }
+ }
+
+ return uxReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsReceiveBufferWritable = pdFALSE;
+
+ lIndex = ( int32_t ) pxQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsReceiveBufferWritable == pdTRUE )
+ {
+ xReturn = xQueueReceive( xInternalQueueHandle, pvBuffer, xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+ BaseType_t xIsReceiveBufferWritable = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer,
+ uxQueueGetQueueItemSize( xInternalQueueHandle ),
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsReceiveBufferWritable == pdTRUE )
+ {
+ xReturn = xQueuePeek( xInternalQueueHandle, pvBuffer, xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueSemaphoreTake( xInternalQueueHandle, xTicksToWait );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xMutexHolderTaskInternalHandle = NULL;
+ TaskHandle_t xMutexHolderTaskExternalHandle = NULL;
+ int32_t lIndex, lMutexHolderTaskIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+
+ lIndex = ( int32_t ) xSemaphore;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalQueueHandle );
+
+ if( xMutexHolderTaskInternalHandle != NULL )
+ {
+ lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle );
+
+ if( lMutexHolderTaskIndex != -1 )
+ {
+ xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) );
+ }
+ }
+ }
+ }
+
+ return xMutexHolderTaskExternalHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex,
+ TickType_t xBlockTime ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex,
+ TickType_t xBlockTime ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xMutex;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueTakeMutexRecursive( xInternalQueueHandle, xBlockTime );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+ BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xMutex;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGiveMutexRecursive( xInternalQueueHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet,
+ TickType_t xBlockTimeTicks ) PRIVILEGED_FUNCTION;
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet,
+ TickType_t xBlockTimeTicks ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetMemberHandle_t xSelectedMemberInternal = NULL;
+ QueueSetMemberHandle_t xSelectedMemberExternal = NULL;
+ int32_t lIndexQueueSet, lIndexSelectedMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ xSelectedMemberInternal = xQueueSelectFromSet( xInternalQueueSetHandle, xBlockTimeTicks );
+
+ if( xSelectedMemberInternal != NULL )
+ {
+ lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal );
+
+ if( lIndexSelectedMember != -1 )
+ {
+ xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) );
+ }
+ }
+ }
+ }
+
+ return xSelectedMemberExternal;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL;
+ QueueSetHandle_t xInternalQueueSetHandle;
+ int32_t lIndexQueueSet, lIndexQueueSetMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+ lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore;
+
+ if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) &&
+ ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+ xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) );
+
+ if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) )
+ {
+ xReturn = xQueueAddToSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue,
+ const char * pcName ) PRIVILEGED_FUNCTION;
+
+ void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue,
+ const char * pcName ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueAddToRegistry( xInternalQueueHandle, pcName );
+ }
+ }
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueUnregisterQueue( xInternalQueueHandle );
+ }
+ }
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+ #if configQUEUE_REGISTRY_SIZE > 0
+
+ const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
+
+ const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ const char * pcReturn;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ pcReturn = pcQueueGetName( xInternalQueueHandle );
+ }
+ }
+
+ return pcReturn;
+ }
+
+ #endif /* if configQUEUE_REGISTRY_SIZE > 0 */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Queue APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ void MPU_vQueueDelete( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ vQueueDelete( xInternalQueueHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateMutex( ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType,
+ StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateCountingSemaphore( UBaseType_t uxCountValue,
+ UBaseType_t uxInitialCount ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
+
+ QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ QueueHandle_t MPU_xQueueGenericCreate( UBaseType_t uxQueueLength,
+ UBaseType_t uxItemSize,
+ uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t * pucQueueStorage,
+ StaticQueue_t * pxStaticQueue,
+ const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueHandle_t xInternalQueueHandle = NULL;
+ QueueHandle_t xExternalQueueHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueHandle = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle );
+ xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue,
+ BaseType_t xNewQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFAIL;
+
+ lIndex = ( uint32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericReset( xInternalQueueHandle, xNewQueue );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
+
+ QueueSetHandle_t MPU_xQueueCreateSet( UBaseType_t uxEventQueueLength ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetHandle_t xExternalQueueSetHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalQueueSetHandle = xQueueCreateSet( uxEventQueueLength );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ MPU_StoreQueueSetHandleAtIndex( lIndex, xInternalQueueSetHandle );
+ xExternalQueueSetHandle = ( QueueSetHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalQueueSetHandle;
+ }
+
+ #endif /* if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL;
+ QueueSetHandle_t xInternalQueueSetHandle;
+ int32_t lIndexQueueSet, lIndexQueueSetMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+ lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore;
+
+ if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) &&
+ ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+ xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) );
+
+ if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) )
+ {
+ xReturn = xQueueRemoveFromSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
+ uint8_t ** ppucQueueStorage,
+ StaticQueue_t ** ppxStaticQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+ BaseType_t xReturn = pdFALSE;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericGetStaticBuffers( xInternalQueueHandle, ppucQueueStorage, ppxStaticQueue );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /*if ( configSUPPORT_STATIC_ALLOCATION == 1 )*/
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGenericSendFromISR( xInternalQueueHandle, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueGiveFromISR( xInternalQueueHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueuePeekFromISR( xInternalQueueHandle, pvBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueReceiveFromISR( xInternalQueueHandle, pvBuffer, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueIsQueueEmptyFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFAIL;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ xReturn = xQueueIsQueueFullFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ int32_t lIndex;
+ QueueHandle_t xInternalQueueHandle = NULL;
+
+ lIndex = ( int32_t ) xQueue;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalQueueHandle != NULL )
+ {
+ uxReturn = uxQueueMessagesWaitingFromISR( xInternalQueueHandle );
+ }
+ }
+
+ return uxReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+ TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xMutexHolderTaskInternalHandle = NULL;
+ TaskHandle_t xMutexHolderTaskExternalHandle = NULL;
+ int32_t lIndex, lMutexHolderTaskIndex;
+ QueueHandle_t xInternalSemaphoreHandle = NULL;
+
+ lIndex = ( int32_t ) xSemaphore;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalSemaphoreHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalSemaphoreHandle != NULL )
+ {
+ xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalSemaphoreHandle );
+
+ if( xMutexHolderTaskInternalHandle != NULL )
+ {
+ lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle );
+
+ if( lMutexHolderTaskIndex != -1 )
+ {
+ xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) );
+ }
+ }
+ }
+ }
+
+ return xMutexHolderTaskExternalHandle;
+ }
+
+ #endif /* #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_QUEUE_SETS == 1 )
+
+ QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */
+ {
+ QueueSetHandle_t xInternalQueueSetHandle = NULL;
+ QueueSetMemberHandle_t xSelectedMemberInternal = NULL;
+ QueueSetMemberHandle_t xSelectedMemberExternal = NULL;
+ int32_t lIndexQueueSet, lIndexSelectedMember;
+
+ lIndexQueueSet = ( int32_t ) xQueueSet;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE )
+ {
+ xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) );
+
+ if( xInternalQueueSetHandle != NULL )
+ {
+ xSelectedMemberInternal = xQueueSelectFromSetFromISR( xInternalQueueSetHandle );
+
+ if( xSelectedMemberInternal != NULL )
+ {
+ lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal );
+
+ if( lIndexSelectedMember != -1 )
+ {
+ xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) );
+ }
+ }
+ }
+ }
+
+ return xSelectedMemberExternal;
+ }
+
+ #endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for timers APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ void * pvReturn = NULL;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ pvReturn = pvTimerGetTimerID( xInternalTimerHandle );
+ }
+ }
+
+ return pvReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer,
+ void * pvNewID ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer,
+ void * pvNewID ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ vTimerSetTimerID( xInternalTimerHandle, pvNewID );
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerIsTimerActive( xInternalTimerHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) PRIVILEGED_FUNCTION;
+
+ TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */
+ {
+ TaskHandle_t xReturn;
+
+ xReturn = xTimerGetTimerDaemonTaskHandle();
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE;
+
+ if( pxHigherPriorityTaskWoken != NULL )
+ {
+ xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxHigherPriorityTaskWoken,
+ sizeof( BaseType_t ),
+ tskMPU_WRITE_PERMISSION );
+ }
+
+ if( ( pxHigherPriorityTaskWoken == NULL ) || ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) )
+ {
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGenericCommand( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ const char * pcReturn = NULL;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ pcReturn = pcTimerGetName( xInternalTimerHandle );
+ }
+ }
+
+ return pcReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer,
+ const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION;
+
+ void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer,
+ const UBaseType_t uxAutoReload ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ vTimerSetReloadMode( xInternalTimerHandle, uxAutoReload );
+ }
+ }
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetReloadMode( xInternalTimerHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t uxReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ uxReturn = uxTimerGetReloadMode( xInternalTimerHandle );
+ }
+ }
+
+ return uxReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetPeriod( xInternalTimerHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TIMERS == 1 )
+
+ TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
+
+ TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */
+ {
+ TickType_t xReturn = 0;
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetExpiryTime( xInternalTimerHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Timer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ TimerHandle_t xExternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTimerHandle = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction );
+ xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTimerHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName,
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void * const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t * pxTimerBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ TimerHandle_t xExternalTimerHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalTimerHandle = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback, pxTimerBuffer );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction );
+ xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalTimerHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 )
+
+ BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer,
+ StaticTimer_t ** ppxTimerBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ TimerHandle_t xInternalTimerHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xReturn = pdFALSE;
+
+ lIndex = ( int32_t ) xTimer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalTimerHandle != NULL )
+ {
+ xReturn = xTimerGetStaticBuffer( xInternalTimerHandle, ppxTimerBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for event group APIs. */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupWaitBits( xInternalEventGroupHandle, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupClearBits( xInternalEventGroupHandle, uxBitsToClear );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSetBits( xInternalEventGroupHandle, uxBitsToSet );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSync( xInternalEventGroupHandle, uxBitsToSet, uxBitsToWaitFor, xTicksToWait );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) PRIVILEGED_FUNCTION;
+
+ UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ UBaseType_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = uxEventGroupGetNumber( xInternalEventGroupHandle );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+ #if ( configUSE_TRACE_FACILITY == 1 )
+
+ void MPU_vEventGroupSetNumberImpl( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION;
+
+ void MPU_vEventGroupSetNumberImpl( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ vEventGroupSetNumber( xInternalEventGroupHandle, uxEventGroupNumber );
+ }
+ }
+ }
+
+ #endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Event Group APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ EventGroupHandle_t MPU_xEventGroupCreate( void ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ EventGroupHandle_t xExternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalEventGroupHandle = xEventGroupCreate();
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle );
+ xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalEventGroupHandle;
+ }
+
+ #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ EventGroupHandle_t xExternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalEventGroupHandle = xEventGroupCreateStatic( pxEventGroupBuffer );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle );
+ xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+
+ return xExternalEventGroupHandle;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ vEventGroupDelete( xInternalEventGroupHandle );
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
+ StaticEventGroup_t ** ppxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupGetStaticBuffer( xInternalEventGroupHandle, ppxEventGroupBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
+
+ BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupClearBitsFromISR( xInternalEventGroupHandle, uxBitsToClear );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
+
+ BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupSetBitsFromISR( xInternalEventGroupHandle, uxBitsToSet, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+ EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */
+ {
+ EventBits_t xReturn = 0;
+ EventGroupHandle_t xInternalEventGroupHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xEventGroup;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalEventGroupHandle != NULL )
+ {
+ xReturn = xEventGroupGetBitsFromISR( xInternalEventGroupHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+/*-----------------------------------------------------------*/
+/* MPU wrappers for stream buffer APIs. */
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsTxDataBufferReadable = pdFALSE;
+
+ xIsTxDataBufferReadable = xPortIsAuthorizedToAccessBuffer( pvTxData,
+ xDataLengthBytes,
+ tskMPU_READ_PERMISSION );
+
+ if( xIsTxDataBufferReadable == pdTRUE )
+ {
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSend( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+ BaseType_t xIsRxDataBufferWriteable = pdFALSE;
+
+ xIsRxDataBufferWriteable = xPortIsAuthorizedToAccessBuffer( pvRxData,
+ xBufferLengthBytes,
+ tskMPU_WRITE_PERMISSION );
+
+ if( xIsRxDataBufferWriteable == pdTRUE )
+ {
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceive( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, xTicksToWait );
+ }
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferIsFull( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferIsEmpty( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSpacesAvailable( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferBytesAvailable( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) PRIVILEGED_FUNCTION;
+
+ BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSetTriggerLevel( xInternalStreamBufferHandle, xTriggerLevel );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
+
+ size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferNextMessageLengthBytes( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+/* Privileged only wrappers for Stream Buffer APIs. These are needed so that
+ * the application can use opaque handles maintained in mpu_wrappers.c
+ * with all the APIs. */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xIsMessageBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ StreamBufferHandle_t xExternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ /**
+ * Stream buffer application level callback functionality is disabled for MPU
+ * enabled ports.
+ */
+ configASSERT( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) );
+
+ if( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) )
+ {
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalStreamBufferHandle = xStreamBufferGenericCreate( xBufferSizeBytes,
+ xTriggerLevelBytes,
+ xIsMessageBuffer,
+ NULL,
+ NULL );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle );
+ xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+ else
+ {
+ traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer );
+ xExternalStreamBufferHandle = NULL;
+ }
+
+ return xExternalStreamBufferHandle;
+ }
+
+ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
+ size_t xTriggerLevelBytes,
+ BaseType_t xIsMessageBuffer,
+ uint8_t * const pucStreamBufferStorageArea,
+ StaticStreamBuffer_t * const pxStaticStreamBuffer,
+ StreamBufferCallbackFunction_t pxSendCompletedCallback,
+ StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ StreamBufferHandle_t xExternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ /**
+ * Stream buffer application level callback functionality is disabled for MPU
+ * enabled ports.
+ */
+ configASSERT( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) );
+
+ if( ( pxSendCompletedCallback == NULL ) &&
+ ( pxReceiveCompletedCallback == NULL ) )
+ {
+ lIndex = MPU_GetFreeIndexInKernelObjectPool();
+
+ if( lIndex != -1 )
+ {
+ xInternalStreamBufferHandle = xStreamBufferGenericCreateStatic( xBufferSizeBytes,
+ xTriggerLevelBytes,
+ xIsMessageBuffer,
+ pucStreamBufferStorageArea,
+ pxStaticStreamBuffer,
+ NULL,
+ NULL );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle );
+ xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex );
+ }
+ else
+ {
+ MPU_SetIndexFreeInKernelObjectPool( lIndex );
+ }
+ }
+ }
+ else
+ {
+ traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer );
+ xExternalStreamBufferHandle = NULL;
+ }
+
+ return xExternalStreamBufferHandle;
+ }
+
+ #endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+ void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ vStreamBufferDelete( xInternalStreamBufferHandle );
+ }
+
+ MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+ }
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReset( xInternalStreamBufferHandle );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
+
+ BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers,
+ uint8_t * ppucStreamBufferStorageArea,
+ StaticStreamBuffer_t * ppxStaticStreamBuffer ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffers;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = MPU_xStreamBufferGetStaticBuffers( xInternalStreamBufferHandle, ppucStreamBufferStorageArea, ppxStaticStreamBuffer );
+ }
+ }
+
+ return xReturn;
+ }
+
+ #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSendFromISR( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ size_t xReturn = 0;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceiveFromISR( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferSendCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+/*-----------------------------------------------------------*/
+
+ BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
+ BaseType_t * pxHigherPriorityTaskWoken ) /*PRIVILEGED_FUNCTION */
+ {
+ BaseType_t xReturn = pdFALSE;
+ StreamBufferHandle_t xInternalStreamBufferHandle = NULL;
+ int32_t lIndex;
+
+ lIndex = ( int32_t ) xStreamBuffer;
+
+ if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
+ {
+ xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
+
+ if( xInternalStreamBufferHandle != NULL )
+ {
+ xReturn = xStreamBufferReceiveCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken );
+ }
+ }
+
+ return xReturn;
+ }
+
+/*-----------------------------------------------------------*/
+
+/* Functions that the application writer wants to execute in privileged mode
+ * can be defined in application_defined_privileged_functions.h. */
+
+ #if configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS == 1
+ #include "application_defined_privileged_functions.h"
+ #endif
+/*-----------------------------------------------------------*/
+
+#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
+/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..a1e5ce0
--- /dev/null
+++ b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2419 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM23/non_secure/port.c
+++ b/portable/GCC/ARM_CM23/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c
index 44f159a..64a24f5 100644
--- a/portable/GCC/ARM_CM23/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM23/non_secure/portasm.c
@@ -44,6 +44,109 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -54,83 +157,24 @@
" ldr r3, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r2] \n"/* Program RNR = 4. */
- " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r2] \n"/* Program RNR = 5. */
- " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r2] \n"/* Program RNR = 6. */
- " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r2] \n"/* Program RNR = 7. */
- " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */
- " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r3 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -237,6 +281,167 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r3} \n" /* LR is now in r3. */
+ " mov lr, r3 \n" /* Restore LR. */
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " stmia r2!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r2!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " mov r6, lr \n" /* r6 = LR. */
+ " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r2, #20 \n"
+ " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ " subs r2, #20 \n"
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " mov lr, r6 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r4} \n" /* LR is now in r4. */
+ " mov lr, r4 \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r2, #48 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r2, #32 \n"
+ " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r2, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,52 +465,26 @@
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #48 \n"/* r2 = r2 - 48. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
+ " mov r4, r8 \n"/* r4 = r8. */
+ " mov r5, r9 \n"/* r5 = r9. */
+ " mov r6, r10 \n"/* r6 = r10. */
+ " mov r7, r11 \n"/* r7 = r11. */
+ " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" \n"
" select_next_task: \n"
" cpsid i \n"
@@ -316,85 +495,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r5, #4 \n"/* r5 = 4. */
- " str r5, [r4] \n"/* Program RNR = 4. */
- " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r5, #5 \n"/* r5 = 5. */
- " str r5, [r4] \n"/* Program RNR = 5. */
- " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r5, #6 \n"/* r5 = 6. */
- " str r5, [r4] \n"/* Program RNR = 6. */
- " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r5, #7 \n"/* r5 = 7. */
- " str r5, [r4] \n"/* Program RNR = 7. */
- " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " movs r5, #1 \n"/* r5 = 1. */
- " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" adds r2, r2, #16 \n"/* Move to the high registers. */
@@ -411,16 +527,62 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r2, [r0, #24] \n"
+ " subs r2, #2 \n"
+ " ldrb r3, [r2, #0] \n"
+ " cmp r3, %0 \n"
+ " beq system_call_enter \n"
+ " cmp r3, %1 \n"
+ " beq system_call_enter_1 \n"
+ " cmp r3, %2 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_enter_1: \n"
+ " b vSystemCallEnter_1 \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -443,6 +605,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..a1e5ce0
--- /dev/null
+++ b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2419 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0, r1} \n"
+ " mrs r0, control \n"
+ " movs r1, #1 \n"
+ " tst r0, r1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0, r1} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0, r1} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
+++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
index 7fb7b5a..b11b6e9 100644
--- a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c
@@ -44,6 +44,106 @@
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -54,78 +154,21 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -232,6 +275,136 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ " stmia r1!, {r4-r7} \n" /* Store r4-r7. */
+ " mov r4, r8 \n" /* r4 = r8. */
+ " mov r5, r9 \n" /* r5 = r9. */
+ " mov r6, r10 \n" /* r6 = r10. */
+ " mov r7, r11 \n" /* r7 = r11. */
+ " stmia r1!, {r4-r7} \n" /* Store r8-r11. */
+ " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */
+ " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " mov r5, lr \n" /* r5 = LR. */
+ " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " cpsid i \n"
+ " bl vTaskSwitchContext \n"
+ " cpsie i \n"
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */
+ " movs r3, #5 \n" /* r3 = 5. */
+ " str r3, [r1] \n" /* Program RNR = 5. */
+ " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */
+ " movs r3, #6 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 6. */
+ " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */
+ " movs r3, #7 \n" /* r3 = 6. */
+ " str r3, [r1] \n" /* Program RNR = 7. */
+ " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " movs r3, #1 \n" /* r3 = 1. */
+ " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " subs r1, #16 \n"
+ " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ " subs r1, #16 \n"
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " mov lr, r5 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */
+ " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */
+ " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */
+ " subs r1, #48 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */
+ " mov r8, r4 \n" /* r8 = r4. */
+ " mov r9, r5 \n" /* r9 = r5. */
+ " mov r10, r6 \n" /* r10 = r6. */
+ " mov r11, r7 \n" /* r11 = r7. */
+ " subs r1, #32 \n"
+ " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */
+ " subs r1, #16 \n"
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -241,30 +414,16 @@
" mrs r0, psp \n"/* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #else /* configENABLE_MPU */
- " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
- " str r0, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
- " mov r4, r8 \n"/* r4 = r8. */
- " mov r5, r9 \n"/* r5 = r9. */
- " mov r6, r10 \n"/* r6 = r10. */
- " mov r7, r11 \n"/* r7 = r11. */
- " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
+ " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
+ " str r0, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
+ " mov r4, r8 \n"/* r4 = r8. */
+ " mov r5, r9 \n"/* r5 = r9. */
+ " mov r6, r10 \n"/* r6 = r10. */
+ " mov r7, r11 \n"/* r7 = r11. */
+ " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
@@ -274,88 +433,76 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */
- " movs r4, #5 \n"/* r4 = 5. */
- " str r4, [r2] \n"/* Program RNR = 5. */
- " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */
- " movs r4, #6 \n"/* r4 = 6. */
- " str r4, [r2] \n"/* Program RNR = 6. */
- " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */
- " movs r4, #7 \n"/* r4 = 7. */
- " str r4, [r2] \n"/* Program RNR = 7. */
- " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " movs r4, #1 \n"/* r4 = 1. */
- " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " adds r0, r0, #28 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- " bx r3 \n"
- #else /* configENABLE_MPU */
- " adds r0, r0, #24 \n"/* Move to the high registers. */
- " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
- " mov r8, r4 \n"/* r8 = r4. */
- " mov r9, r5 \n"/* r9 = r5. */
- " mov r10, r6 \n"/* r10 = r6. */
- " mov r11, r7 \n"/* r11 = r7. */
- " msr psp, r0 \n"/* Remember the new top of stack for the task. */
- " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
- " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- " bx r3 \n"
- #endif /* configENABLE_MPU */
+ " adds r0, r0, #24 \n"/* Move to the high registers. */
+ " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
+ " mov r8, r4 \n"/* r8 = r4. */
+ " mov r9, r5 \n"/* r9 = r5. */
+ " mov r10, r6 \n"/* r10 = r6. */
+ " mov r11, r7 \n"/* r11 = r7. */
+ " msr psp, r0 \n"/* Remember the new top of stack for the task. */
+ " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
+ " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
+ " bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "movs r0, #4 \n"
+ "mov r1, lr \n"
+ "tst r0, r1 \n"
+ "beq stack_on_msp \n"
+ "stack_on_psp: \n"
+ " mrs r0, psp \n"
+ " b route_svc \n"
+ "stack_on_msp: \n"
+ " mrs r0, msp \n"
+ " b route_svc \n"
+ " \n"
+ "route_svc: \n"
+ " ldr r2, [r0, #24] \n"
+ " subs r2, #2 \n"
+ " ldrb r3, [r2, #0] \n"
+ " cmp r3, %0 \n"
+ " beq system_call_enter \n"
+ " cmp r3, %1 \n"
+ " beq system_call_enter_1 \n"
+ " cmp r3, %2 \n"
+ " beq system_call_exit \n"
+ " b vPortSVCHandler_C \n"
+ " \n"
+ "system_call_enter: \n"
+ " b vSystemCallEnter \n"
+ "system_call_enter_1: \n"
+ " b vSystemCallEnter_1 \n"
+ "system_call_exit: \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "r3", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -378,4 +525,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM33/non_secure/port.c
+++ b/portable/GCC/ARM_CM33/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM33/non_secure/portasm.c b/portable/GCC/ARM_CM33/non_secure/portasm.c
index 9f9b2e6..f7ec7d9 100644
--- a/portable/GCC/ARM_CM33/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM33/non_secure/portasm.c
@@ -40,95 +40,120 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -236,6 +261,160 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,20 +439,11 @@
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
@@ -284,26 +454,14 @@
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
@@ -318,83 +476,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
@@ -409,17 +506,60 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -437,6 +577,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
+++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
index a78529d..504b6bf 100644
--- a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c
@@ -40,6 +40,88 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -50,80 +132,23 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -231,6 +256,129 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -238,21 +386,16 @@
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
+ " \n"
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
@@ -270,52 +413,7 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
+ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -323,28 +421,66 @@
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -362,4 +498,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM35P/non_secure/port.c
+++ b/portable/GCC/ARM_CM35P/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.c b/portable/GCC/ARM_CM35P/non_secure/portasm.c
index 9f9b2e6..f7ec7d9 100644
--- a/portable/GCC/ARM_CM35P/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM35P/non_secure/portasm.c
@@ -40,95 +40,120 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -236,6 +261,160 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,20 +439,11 @@
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
@@ -284,26 +454,14 @@
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
@@ -318,83 +476,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
@@ -409,17 +506,60 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -437,6 +577,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c
+++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c
index a78529d..504b6bf 100644
--- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c
@@ -40,6 +40,88 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -50,80 +132,23 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -231,6 +256,129 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -238,21 +386,16 @@
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
+ " \n"
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
@@ -270,52 +413,7 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
+ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -323,28 +421,66 @@
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -362,4 +498,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..df9239a
--- /dev/null
+++ b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c
index 1a3f9473..d29b31d 100755
--- a/portable/GCC/ARM_CM3_MPU/port.c
+++ b/portable/GCC/ARM_CM3_MPU/port.c
@@ -90,6 +90,7 @@
/* Constants required to set up the initial stack. */
#define portINITIAL_XPSR ( 0x01000000 )
+#define portINITIAL_EXC_RETURN ( 0xfffffffdUL )
#define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 )
#define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 )
@@ -103,12 +104,31 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/*
@@ -146,7 +166,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
/**
* @brief Checks whether or not the processor is privileged.
@@ -182,6 +202,53 @@
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
@@ -207,34 +274,91 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
-void vPortSVCHandler( void )
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq vSystemCallEnter \n"
+ "cmp r2, %1 \n"
+ "beq vSystemCallEnter_1 \n"
+ "cmp r2, %2 \n"
+ "beq vSystemCallExit \n"
+ "b vSVCHandler_C \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
/* Assumes psp was in use. */
__asm volatile
@@ -248,12 +372,14 @@
" mrs r0, psp \n"
#endif
" b %0 \n"
- ::"i" ( prvSVCHandler ) : "r0", "memory"
+ ::"i" ( vSVCHandler_C ) : "r0", "memory"
);
}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
-static void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
@@ -262,7 +388,7 @@
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
- * exported from linker scripts. */
+ * exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
@@ -296,7 +422,6 @@
break;
-
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
* svc was raised from any of the
@@ -325,7 +450,7 @@
::: "r1", "memory"
);
break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
default: /* Unknown SVC call. */
break;
@@ -333,45 +458,311 @@
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulSystemCallLocation, i;
+ const uint32_t ulStackFrameSize = 8;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulSystemCallLocation, i;
+ const uint32_t ulStackFrameSize = 8;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulSystemCallLocation, i;
+ const uint32_t ulStackFrameSize = 8;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Restore the stacked link register to what it was at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
static void prvRestoreContextOfFirstTask( void )
{
__asm volatile
(
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */
- " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldr r14, =0xfffffffd \n"/* Load exec return code. */
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
@@ -585,53 +976,66 @@
__asm volatile
(
- " mrs r0, psp \n"
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */
" \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
+ /*------------ Save Context. ----------- */
+ " mrs r3, control \n"
+ " mrs r0, psp \n"
+ " isb \n"
" \n"
- " mrs r1, control \n"
- " stmdb r0!, {r1, r4-r11} \n"/* Save the remaining registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
+ " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */
+ " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */
+ " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */
" \n"
- " stmdb sp!, {r3, r14} \n"
- " mov r0, %0 \n"
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r3, r14} \n"
- " \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
+ /*---------- Select next task. --------- */
+ " mov r0, %0 \n"
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
" \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
" \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */
- " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
" \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
" \n"
- " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
" \n"
- " msr psp, r0 \n"
- " bx r14 \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
" \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
@@ -816,11 +1220,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -843,6 +1255,11 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( portMPU_REGION_CACHEABLE_BUFFERABLE ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -863,12 +1280,28 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -877,6 +1310,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
@@ -936,4 +1410,4 @@
}
#endif /* configASSERT_DEFINED */
-/*-----------------------------------------------------------*/
\ No newline at end of file
+/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h
index e73447b..d1f659e 100644
--- a/portable/GCC/ARM_CM3_MPU/portmacro.h
+++ b/portable/GCC/ARM_CM3_MPU/portmacro.h
@@ -104,10 +104,45 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
-/* Plus 1 to create space for the stack region. */
+ typedef struct MPU_REGION_SETTINGS
+ {
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+ } xMPU_REGION_SETTINGS;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ #define MAX_CONTEXT_SIZE 20
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -118,9 +153,12 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
- #define portSVC_START_SCHEDULER 0
- #define portSVC_YIELD 1
- #define portSVC_RAISE_PRIVILEGE 2
+ #define portSVC_START_SCHEDULER 0
+ #define portSVC_YIELD 1
+ #define portSVC_RAISE_PRIVILEGE 2
+ #define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
+ #define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
+ #define portSVC_SYSTEM_CALL_EXIT 5
/* Scheduler utilities. */
@@ -232,6 +270,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..df9239a
--- /dev/null
+++ b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c
index 8a7f4c1..bbcf733 100755
--- a/portable/GCC/ARM_CM4_MPU/port.c
+++ b/portable/GCC/ARM_CM4_MPU/port.c
@@ -118,13 +118,35 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/*
* Configure a number of standard MPU regions that are used by all tasks.
*/
@@ -160,7 +182,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION;
/*
* Function to enable the VFP.
@@ -201,6 +223,56 @@
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
@@ -227,39 +299,102 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void vPortSVCHandler( void )
{
/* Assumes psp was in use. */
@@ -274,12 +409,14 @@
" mrs r0, psp \n"
#endif
" b %0 \n"
- ::"i" ( prvSVCHandler ) : "r0", "memory"
+ ::"i" ( vSVCHandler_C ) : "r0", "memory"
);
}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
-static void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
@@ -288,7 +425,7 @@
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
- * exported from linker scripts. */
+ * exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
@@ -350,7 +487,7 @@
::: "r1", "memory"
);
break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
default: /* Unknown SVC call. */
break;
@@ -358,52 +495,364 @@
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Restore the stacked link register to what it was at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
static void prvRestoreContextOfFirstTask( void )
{
__asm volatile
(
- " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
- " ldr r0, [r0] \n"
- " ldr r0, [r0] \n"
- " msr msp, r0 \n"/* Set the msp back to the start of the stack. */
- " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
- " \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
- " \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- " \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- " \n"
- " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
- " msr psp, r0 \n"/* Restore the task stack pointer. */
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " bx r14 \n"
- " \n"
- " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
+ " ldr r0, [r0] \n"
+ " ldr r0, [r0] \n"
+ " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
+ " \n"
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
+ " \n"
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+ " \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
+ " bx lr \n"
+ " \n"
+ " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB\n"
);
}
/*-----------------------------------------------------------*/
@@ -639,76 +1088,94 @@
__asm volatile
(
- " mrs r0, psp \n"
- " isb \n"
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */
" \n"
- " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */
- " ldr r2, [r3] \n"
+ /*------------ Save Context. ----------- */
+ " mrs r3, control \n"
+ " mrs r0, psp \n"
+ " isb \n"
" \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */
- " it eq \n"
- " vstmdbeq r0!, {s16-s31} \n"
+ " add r0, r0, #0x20 \n" /* Move r0 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r0, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r0, r0, #0x20 \n" /* Set r0 back to the location of hardware saved context. */
" \n"
- " mrs r1, control \n"
- " stmdb r0!, {r1, r4-r11, r14} \n"/* Save the remaining registers. */
- " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */
+ " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */
+ " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */
+ " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */
" \n"
- " stmdb sp!, {r0, r3} \n"
- " mov r0, %0 \n"
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- " msr basepri, r0 \n"
- " dsb \n"
- " isb \n"
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
- " bl vTaskSwitchContext \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"
- " ldmia sp!, {r0, r3} \n"
- " \n"/* Restore the context. */
- " ldr r1, [r3] \n"
- " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */
- " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */
+ /*---------- Select next task. --------- */
+ " mov r0, %0 \n"
+ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ #endif
+ " msr basepri, r0 \n"
+ " dsb \n"
+ " isb \n"
+ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+ #endif
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"
" \n"
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- " str r3, [r2] \n"/* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */
" \n"
- " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ " str r3, [r0] \n" /* Disable MPU. */
" \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
+ " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
" \n"
- " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */
- " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- " str r3, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
" \n"
- " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */
- " msr control, r3 \n"
+ " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */
+ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */
+ " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ " str r3, [r0] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
" \n"
- " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */
- " it eq \n"
- " vldmiaeq r0!, {s16-s31} \n"
+ /*---------- Restore Context. ---------- */
+ " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */
+ " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */
+ " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */
" \n"
- " msr psp, r0 \n"
- " bx r14 \n"
+ " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ " msr psp, r0 \n"
+ " stmia r0!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ " msr control, r3 \n"
+
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r0!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+
+ " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
" \n"
- " .ltorg \n"/* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */
- " .align 4 \n"
- "pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " .ltorg \n" /* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
@@ -939,11 +1406,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -966,6 +1441,12 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -986,12 +1467,28 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -1000,6 +1497,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
@@ -1059,4 +1597,4 @@
}
#endif /* configASSERT_DEFINED */
-/*-----------------------------------------------------------*/
\ No newline at end of file
+/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h
index f3365d1..5417fea 100644
--- a/portable/GCC/ARM_CM4_MPU/portmacro.h
+++ b/portable/GCC/ARM_CM4_MPU/portmacro.h
@@ -193,9 +193,45 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+#define MAX_CONTEXT_SIZE 52
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -206,9 +242,12 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 0
+#define portSVC_YIELD 1
+#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 5
/* Scheduler utilities. */
@@ -320,6 +359,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM55/non_secure/port.c
+++ b/portable/GCC/ARM_CM55/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.c b/portable/GCC/ARM_CM55/non_secure/portasm.c
index 9f9b2e6..f7ec7d9 100644
--- a/portable/GCC/ARM_CM55/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM55/non_secure/portasm.c
@@ -40,95 +40,120 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -236,6 +261,160 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,20 +439,11 @@
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
@@ -284,26 +454,14 @@
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
@@ -318,83 +476,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
@@ -409,17 +506,60 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -437,6 +577,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
+++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
index a78529d..504b6bf 100644
--- a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c
@@ -40,6 +40,88 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -50,80 +132,23 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -231,6 +256,129 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -238,21 +386,16 @@
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
+ " \n"
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
@@ -270,52 +413,7 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
+ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -323,28 +421,66 @@
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -362,4 +498,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM85/non_secure/port.c
+++ b/portable/GCC/ARM_CM85/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM85/non_secure/portasm.c b/portable/GCC/ARM_CM85/non_secure/portasm.c
index 9f9b2e6..f7ec7d9 100644
--- a/portable/GCC/ARM_CM85/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM85/non_secure/portasm.c
@@ -40,95 +40,120 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xSecureContextConst2: .word xSecureContext \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
- " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r3, [r2] \n"/* Read pxCurrentTCB. */
- " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r3, [r2] \n" /* Read pxCurrentTCB. */
+ " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r2] \n"/* Program RNR = 4. */
- " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- " ldr r5, xSecureContextConst2 \n"
- " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " msr control, r3 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r4 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
- " ldr r4, xSecureContextConst2 \n"
- " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
- " msr psplim, r2 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
- " \n"
+ " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
+ " ldr r4, xSecureContextConst2 \n"
+ " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
+ " msr psplim, r2 \n" /* Set this task's PSPLIM value. */
+ " movs r1, #2 \n" /* r1 = 2. */
+ " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n" /* Discard everything up to r0. */
+ " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx r3 \n" /* Finally, branch to EXC_RETURN. */
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -236,6 +261,160 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern SecureContext_SaveContext \n"
+ " .extern SecureContext_LoadContext \n"
+ " \n"
+ " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */
+ " \n"
+ " cbz r0, save_ns_context \n" /* No secure context to save. */
+ " save_s_context: \n"
+ " push {r0-r2, lr} \n"
+ " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r0-r2, lr} \n"
+ " \n"
+ " save_ns_context: \n"
+ " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */
+ " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " save_general_regs: \n"
+ " mrs r3, psp \n"
+ " \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r2!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psp \n" /* r3 = PSP. */
+ " mrs r4, psplim \n" /* r4 = PSPLIM. */
+ " mrs r5, control \n" /* r5 = CONTROL. */
+ " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/
+ " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ " msr psp, r3 \n"
+ " msr psplim, r4 \n"
+ " msr control, r5 \n"
+ " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r4] \n" /* Restore xSecureContext. */
+ " cbz r0, restore_ns_context \n" /* No secure context to restore. */
+ " \n"
+ " restore_s_context: \n"
+ " push {r1-r3, lr} \n"
+ " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r1-r3, lr} \n"
+ " \n"
+ " restore_ns_context: \n"
+ " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */
+ " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xSecureContextConst: .word xSecureContext \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -260,20 +439,11 @@
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
@@ -284,26 +454,14 @@
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #16 \n"/* r2 = r2 + 16. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r3, control \n"/* r3 = CONTROL. */
- " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */
- " subs r2, r2, #16 \n"/* r2 = r2 - 16. */
- " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
- " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
- " str r2, [r1] \n"/* Save the new top of stack in TCB. */
- " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
- " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
- " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
+ " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
+ " str r2, [r1] \n"/* Save the new top of stack in TCB. */
+ " adds r2, r2, #12 \n"/* r2 = r2 + 12. */
+ " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
+ " mrs r1, psplim \n"/* r1 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " subs r2, r2, #12 \n"/* r2 = r2 - 12. */
+ " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
@@ -318,83 +476,22 @@
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r3] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */
- " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */
- " str r4, [r3] \n"/* Program MAIR0. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #4 \n"/* r4 = 4. */
- " str r4, [r3] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #8 \n"/* r4 = 8. */
- " str r4, [r3] \n"/* Program RNR = 8. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */
- " movs r4, #12 \n"/* r4 = 12. */
- " str r4, [r3] \n"/* Program RNR = 12. */
- " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r3] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r3 \n"/* Restore the CONTROL register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #else /* configENABLE_MPU */
- " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " mov lr, r4 \n"/* LR = r4. */
- " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
- " str r0, [r3] \n"/* Restore the task's xSecureContext. */
- " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
- " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
- " push {r2, r4} \n"
- " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- " pop {r2, r4} \n"
- " mov lr, r4 \n"/* LR = r4. */
- " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- " msr psp, r2 \n"/* Remember the new top of stack for the task. */
- " bx lr \n"
- #endif /* configENABLE_MPU */
+ " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
+ " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
+ " mov lr, r4 \n"/* LR = r4. */
+ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ " str r0, [r3] \n"/* Restore the task's xSecureContext. */
+ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
+ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */
+ " push {r2, r4} \n"
+ " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ " pop {r2, r4} \n"
+ " mov lr, r4 \n"/* LR = r4. */
+ " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
+ " msr psp, r2 \n"/* Remember the new top of stack for the task. */
+ " bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
@@ -409,17 +506,60 @@
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -437,6 +577,8 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..6e20434
--- /dev/null
+++ b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c
@@ -0,0 +1,2349 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskDelayUntilImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskDelayUntil_Unpriv \n"
+ " MPU_xTaskDelayUntil_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskDelayUntilImpl \n"
+ " MPU_xTaskDelayUntil_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskDelayUntilImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskAbortDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskAbortDelay_Unpriv \n"
+ " MPU_xTaskAbortDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskAbortDelayImpl \n"
+ " MPU_xTaskAbortDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskAbortDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskDelayImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskDelay_Unpriv \n"
+ " MPU_vTaskDelay_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskDelayImpl \n"
+ " MPU_vTaskDelay_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskDelayImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskPriorityGetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskPriorityGet_Unpriv \n"
+ " MPU_uxTaskPriorityGet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskPriorityGetImpl \n"
+ " MPU_uxTaskPriorityGet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskPriorityGetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_eTaskGetStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_eTaskGetState_Unpriv \n"
+ " MPU_eTaskGetState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_eTaskGetStateImpl \n"
+ " MPU_eTaskGetState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_eTaskGetStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskGetInfoImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskGetInfo_Unpriv \n"
+ " MPU_vTaskGetInfo_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskGetInfoImpl \n"
+ " MPU_vTaskGetInfo_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskGetInfoImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetIdleTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n"
+ " MPU_xTaskGetIdleTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetIdleTaskHandleImpl \n"
+ " MPU_xTaskGetIdleTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetIdleTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSuspendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSuspend_Unpriv \n"
+ " MPU_vTaskSuspend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSuspendImpl \n"
+ " MPU_vTaskSuspend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSuspendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskResumeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskResume_Unpriv \n"
+ " MPU_vTaskResume_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskResumeImpl \n"
+ " MPU_vTaskResume_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskResumeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetTickCountImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetTickCount_Unpriv \n"
+ " MPU_xTaskGetTickCount_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetTickCountImpl \n"
+ " MPU_xTaskGetTickCount_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetTickCountImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetNumberOfTasksImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n"
+ " MPU_uxTaskGetNumberOfTasks_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetNumberOfTasksImpl \n"
+ " MPU_uxTaskGetNumberOfTasks_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetNumberOfTasksImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTaskGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTaskGetName_Unpriv \n"
+ " MPU_pcTaskGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTaskGetNameImpl \n"
+ " MPU_pcTaskGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTaskGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimeCounterImpl \n"
+ " MPU_ulTaskGetRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetRunTimePercentImpl \n"
+ " MPU_ulTaskGetRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimePercentImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n"
+ " MPU_vTaskSetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetApplicationTaskTagImpl \n"
+ " MPU_vTaskSetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetApplicationTaskTagImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n"
+ " MPU_xTaskGetApplicationTaskTag_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetApplicationTaskTagImpl \n"
+ " MPU_xTaskGetApplicationTaskTag_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetApplicationTaskTagImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetSystemStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetSystemState_Unpriv \n"
+ " MPU_uxTaskGetSystemState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetSystemStateImpl \n"
+ " MPU_uxTaskGetSystemState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetSystemStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMarkImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTaskGetStackHighWaterMark2Impl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n"
+ " MPU_xTaskGetCurrentTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetCurrentTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGetSchedulerStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGetSchedulerState_Unpriv \n"
+ " MPU_xTaskGetSchedulerState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGetSchedulerStateImpl \n"
+ " MPU_xTaskGetSchedulerState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGetSchedulerStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTaskSetTimeOutStateImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTaskSetTimeOutState_Unpriv \n"
+ " MPU_vTaskSetTimeOutState_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTaskSetTimeOutStateImpl \n"
+ " MPU_vTaskSetTimeOutState_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTaskSetTimeOutStateImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskCheckForTimeOutImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskCheckForTimeOut_Unpriv \n"
+ " MPU_xTaskCheckForTimeOut_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskCheckForTimeOutImpl \n"
+ " MPU_xTaskCheckForTimeOut_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskCheckForTimeOutImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotify_Unpriv \n"
+ " MPU_xTaskGenericNotify_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyImpl \n"
+ " MPU_xTaskGenericNotify_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyWaitImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyWait_Unpriv \n"
+ " MPU_xTaskGenericNotifyWait_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyWaitImpl \n"
+ " MPU_xTaskGenericNotifyWait_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyWaitImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyTake_Unpriv \n"
+ " MPU_ulTaskGenericNotifyTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyTakeImpl \n"
+ " MPU_ulTaskGenericNotifyTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTaskGenericNotifyStateClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n"
+ " MPU_xTaskGenericNotifyStateClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTaskGenericNotifyStateClearImpl \n"
+ " MPU_xTaskGenericNotifyStateClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTaskGenericNotifyStateClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n"
+ " MPU_ulTaskGenericNotifyValueClear_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_ulTaskGenericNotifyValueClearImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGenericSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGenericSend_Unpriv \n"
+ " MPU_xQueueGenericSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGenericSendImpl \n"
+ " MPU_xQueueGenericSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGenericSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueMessagesWaitingImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueMessagesWaiting_Unpriv \n"
+ " MPU_uxQueueMessagesWaiting_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueMessagesWaitingImpl \n"
+ " MPU_uxQueueMessagesWaiting_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueMessagesWaitingImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxQueueSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxQueueSpacesAvailable_Unpriv \n"
+ " MPU_uxQueueSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxQueueSpacesAvailableImpl \n"
+ " MPU_uxQueueSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxQueueSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueReceive_Unpriv \n"
+ " MPU_xQueueReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueReceiveImpl \n"
+ " MPU_xQueueReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueuePeekImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueuePeek_Unpriv \n"
+ " MPU_xQueuePeek_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueuePeekImpl \n"
+ " MPU_xQueuePeek_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueuePeekImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSemaphoreTakeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSemaphoreTake_Unpriv \n"
+ " MPU_xQueueSemaphoreTake_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSemaphoreTakeImpl \n"
+ " MPU_xQueueSemaphoreTake_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSemaphoreTakeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGetMutexHolderImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGetMutexHolder_Unpriv \n"
+ " MPU_xQueueGetMutexHolder_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGetMutexHolderImpl \n"
+ " MPU_xQueueGetMutexHolder_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGetMutexHolderImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueTakeMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueTakeMutexRecursive_Unpriv \n"
+ " MPU_xQueueTakeMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueTakeMutexRecursiveImpl \n"
+ " MPU_xQueueTakeMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueTakeMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueGiveMutexRecursiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueGiveMutexRecursive_Unpriv \n"
+ " MPU_xQueueGiveMutexRecursive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueGiveMutexRecursiveImpl \n"
+ " MPU_xQueueGiveMutexRecursive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueGiveMutexRecursiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueSelectFromSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueSelectFromSet_Unpriv \n"
+ " MPU_xQueueSelectFromSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueSelectFromSetImpl \n"
+ " MPU_xQueueSelectFromSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueSelectFromSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xQueueAddToSetImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xQueueAddToSet_Unpriv \n"
+ " MPU_xQueueAddToSet_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xQueueAddToSetImpl \n"
+ " MPU_xQueueAddToSet_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xQueueAddToSetImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueAddToRegistryImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueAddToRegistry_Unpriv \n"
+ " MPU_vQueueAddToRegistry_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueAddToRegistryImpl \n"
+ " MPU_vQueueAddToRegistry_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueAddToRegistryImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vQueueUnregisterQueueImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vQueueUnregisterQueue_Unpriv \n"
+ " MPU_vQueueUnregisterQueue_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vQueueUnregisterQueueImpl \n"
+ " MPU_vQueueUnregisterQueue_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vQueueUnregisterQueueImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcQueueGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcQueueGetName_Unpriv \n"
+ " MPU_pcQueueGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcQueueGetNameImpl \n"
+ " MPU_pcQueueGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcQueueGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pvTimerGetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pvTimerGetTimerID_Unpriv \n"
+ " MPU_pvTimerGetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pvTimerGetTimerIDImpl \n"
+ " MPU_pvTimerGetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pvTimerGetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetTimerIDImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetTimerID_Unpriv \n"
+ " MPU_vTimerSetTimerID_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetTimerIDImpl \n"
+ " MPU_vTimerSetTimerID_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetTimerIDImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerIsTimerActiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerIsTimerActive_Unpriv \n"
+ " MPU_xTimerIsTimerActive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerIsTimerActiveImpl \n"
+ " MPU_xTimerIsTimerActive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerIsTimerActiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, ipsr \n"
+ " cmp r0, #0 \n"
+ " bne MPU_xTimerGenericCommand_Priv \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " beq MPU_xTimerGenericCommand_Priv \n"
+ " MPU_xTimerGenericCommand_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGenericCommandImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " MPU_xTimerGenericCommand_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGenericCommandImpl \n"
+ " \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_pcTimerGetNameImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_pcTimerGetName_Unpriv \n"
+ " MPU_pcTimerGetName_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_pcTimerGetNameImpl \n"
+ " MPU_pcTimerGetName_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_pcTimerGetNameImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vTimerSetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vTimerSetReloadMode_Unpriv \n"
+ " MPU_vTimerSetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vTimerSetReloadModeImpl \n"
+ " MPU_vTimerSetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vTimerSetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetReloadMode_Unpriv \n"
+ " MPU_xTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetReloadModeImpl \n"
+ " MPU_xTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxTimerGetReloadModeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxTimerGetReloadMode_Unpriv \n"
+ " MPU_uxTimerGetReloadMode_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxTimerGetReloadModeImpl \n"
+ " MPU_uxTimerGetReloadMode_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxTimerGetReloadModeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetPeriodImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetPeriod_Unpriv \n"
+ " MPU_xTimerGetPeriod_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetPeriodImpl \n"
+ " MPU_xTimerGetPeriod_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetPeriodImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xTimerGetExpiryTimeImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xTimerGetExpiryTime_Unpriv \n"
+ " MPU_xTimerGetExpiryTime_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xTimerGetExpiryTimeImpl \n"
+ " MPU_xTimerGetExpiryTime_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xTimerGetExpiryTimeImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupWaitBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupWaitBits_Unpriv \n"
+ " MPU_xEventGroupWaitBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupWaitBitsImpl \n"
+ " MPU_xEventGroupWaitBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupWaitBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupClearBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupClearBits_Unpriv \n"
+ " MPU_xEventGroupClearBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupClearBitsImpl \n"
+ " MPU_xEventGroupClearBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupClearBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSetBitsImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSetBits_Unpriv \n"
+ " MPU_xEventGroupSetBits_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSetBitsImpl \n"
+ " MPU_xEventGroupSetBits_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSetBitsImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xEventGroupSyncImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xEventGroupSync_Unpriv \n"
+ " MPU_xEventGroupSync_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xEventGroupSyncImpl \n"
+ " MPU_xEventGroupSync_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xEventGroupSyncImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_uxEventGroupGetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_uxEventGroupGetNumber_Unpriv \n"
+ " MPU_uxEventGroupGetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_uxEventGroupGetNumberImpl \n"
+ " MPU_uxEventGroupGetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_uxEventGroupGetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_vEventGroupSetNumberImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_vEventGroupSetNumber_Unpriv \n"
+ " MPU_vEventGroupSetNumber_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_vEventGroupSetNumberImpl \n"
+ " MPU_vEventGroupSetNumber_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_vEventGroupSetNumberImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSendImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSend_Unpriv \n"
+ " MPU_xStreamBufferSend_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSendImpl \n"
+ " MPU_xStreamBufferSend_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSendImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferReceiveImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferReceive_Unpriv \n"
+ " MPU_xStreamBufferReceive_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferReceiveImpl \n"
+ " MPU_xStreamBufferReceive_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferReceiveImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsFullImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsFull_Unpriv \n"
+ " MPU_xStreamBufferIsFull_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsFullImpl \n"
+ " MPU_xStreamBufferIsFull_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsFullImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferIsEmptyImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferIsEmpty_Unpriv \n"
+ " MPU_xStreamBufferIsEmpty_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferIsEmptyImpl \n"
+ " MPU_xStreamBufferIsEmpty_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferIsEmptyImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSpacesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n"
+ " MPU_xStreamBufferSpacesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSpacesAvailableImpl \n"
+ " MPU_xStreamBufferSpacesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSpacesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferBytesAvailableImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferBytesAvailable_Unpriv \n"
+ " MPU_xStreamBufferBytesAvailable_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferBytesAvailableImpl \n"
+ " MPU_xStreamBufferBytesAvailable_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferBytesAvailableImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n"
+ " MPU_xStreamBufferSetTriggerLevel_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferSetTriggerLevelImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL;
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " \n"
+ " push {r0} \n"
+ " mrs r0, control \n"
+ " tst r0, #1 \n"
+ " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n"
+ " pop {r0} \n"
+ " b MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n"
+ " pop {r0} \n"
+ " svc %0 \n"
+ " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n"
+ " svc %1 \n"
+ " bx lr \n"
+ " \n"
+ : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory"
+ );
+}
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
+++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
index a78529d..504b6bf 100644
--- a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
+++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c
@@ -40,6 +40,88 @@
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+#if ( configENABLE_MPU == 1 )
+
+void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " program_mpu_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context_first_task: \n"
+ " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs_first_task: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs_first_task: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ " \n"
+ " restore_context_done_first_task: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst2: .word pxCurrentTCB \n"
+ " xMPUCTRLConst2: .word 0xe000ed94 \n"
+ " xMAIR0Const2: .word 0xe000edc0 \n"
+ " xRNRConst2: .word 0xe000ed98 \n"
+ " xRBARConst2: .word 0xe000ed9c \n"
+ );
+}
+
+#else /* configENABLE_MPU */
+
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -50,80 +132,23 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " msr control, r2 \n"/* Set this task's CONTROL value. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r3 \n"/* Finally, branch to EXC_RETURN. */
- #else /* configENABLE_MPU */
- " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
- " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
- " movs r1, #2 \n"/* r1 = 2. */
- " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
- " adds r0, #32 \n"/* Discard everything up to r0. */
- " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
- " isb \n"
- " mov r0, #0 \n"
- " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
- " bx r2 \n"/* Finally, branch to EXC_RETURN. */
- #endif /* configENABLE_MPU */
+ " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
+ " msr psplim, r1 \n"/* Set this task's PSPLIM value. */
+ " movs r1, #2 \n"/* r1 = 2. */
+ " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
+ " adds r0, #32 \n"/* Discard everything up to r0. */
+ " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
+ " isb \n"
+ " mov r0, #0 \n"
+ " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
+ " bx r2 \n"/* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst2: .word 0xe000ed94 \n"
- "xMAIR0Const2: .word 0xe000edc0 \n"
- "xRNRConst2: .word 0xe000ed98 \n"
- "xRBARConst2: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
@@ -231,6 +256,129 @@
}
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ " .syntax unified \n"
+ " \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */
+ " mrs r2, psp \n" /* r2 = PSP. */
+ " \n"
+ " save_general_regs: \n"
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */
+ " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */
+ " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */
+ " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " stmia r1!, {r4-r11} \n" /* Store r4-r11. */
+ " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */
+ " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */
+ " \n"
+ " save_special_regs: \n"
+ " mrs r3, psplim \n" /* r3 = PSPLIM. */
+ " mrs r4, control \n" /* r4 = CONTROL. */
+ " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */
+ " \n"
+ " select_next_task: \n"
+ " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
+ " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ " dsb \n"
+ " isb \n"
+ " bl vTaskSwitchContext \n"
+ " mov r0, #0 \n" /* r0 = 0. */
+ " msr basepri, r0 \n" /* Enable interrupts. */
+ " \n"
+ " program_mpu: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */
+ " \n"
+ " dmb \n" /* Complete outstanding transfers before disabling MPU. */
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ " str r2, [r1] \n" /* Disable MPU. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */
+ " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ " str r1, [r2] \n" /* Program MAIR0. */
+ " \n"
+ " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */
+ " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
+ " \n"
+ " movs r3, #4 \n" /* r3 = 4. */
+ " str r3, [r1] \n" /* Program RNR = 4. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " \n"
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ " movs r3, #8 \n" /* r3 = 8. */
+ " str r3, [r1] \n" /* Program RNR = 8. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ " movs r3, #12 \n" /* r3 = 12. */
+ " str r3, [r1] \n" /* Program RNR = 12. */
+ " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+ " \n"
+ " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */
+ " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ " str r2, [r1] \n" /* Enable MPU. */
+ " dsb \n" /* Force memory writes before continuing. */
+ " \n"
+ " restore_context: \n"
+ " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/
+ " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */
+ " \n"
+ " restore_special_regs: \n"
+ " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ " msr psp, r2 \n"
+ " msr psplim, r3 \n"
+ " msr control, r4 \n"
+ " \n"
+ " restore_general_regs: \n"
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */
+ " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */
+ " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ " tst lr, #0x10 \n"
+ " ittt eq \n"
+ " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */
+ " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */
+ " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+ " \n"
+ " restore_context_done: \n"
+ " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */
+ " bx lr \n"
+ " \n"
+ " .align 4 \n"
+ " pxCurrentTCBConst: .word pxCurrentTCB \n"
+ " xMPUCTRLConst: .word 0xe000ed94 \n"
+ " xMAIR0Const: .word 0xe000edc0 \n"
+ " xRNRConst: .word 0xe000ed98 \n"
+ " xRBARConst: .word 0xe000ed9c \n"
+ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
+ );
+}
+
+#else /* configENABLE_MPU */
+
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -238,21 +386,16 @@
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
+ " \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- " mrs r1, psplim \n"/* r1 = PSPLIM. */
- " mrs r2, control \n"/* r2 = CONTROL. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
- #else /* configENABLE_MPU */
- " mrs r2, psplim \n"/* r2 = PSPLIM. */
- " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
- " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
- #endif /* configENABLE_MPU */
+ " \n"
+ " mrs r2, psplim \n"/* r2 = PSPLIM. */
+ " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
+ " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
@@ -270,52 +413,7 @@
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
- #if ( configENABLE_MPU == 1 )
- " dmb \n"/* Complete outstanding transfers before disabling MPU. */
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- " str r4, [r2] \n"/* Disable MPU. */
- " \n"
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */
- " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */
- " str r3, [r2] \n"/* Program MAIR0. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #4 \n"/* r3 = 4. */
- " str r3, [r2] \n"/* Program RNR = 4. */
- " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " \n"
- #if ( configTOTAL_MPU_REGIONS == 16 )
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #8 \n"/* r3 = 8. */
- " str r3, [r2] \n"/* Program RNR = 8. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */
- " movs r3, #12 \n"/* r3 = 12. */
- " str r3, [r2] \n"/* Program RNR = 12. */
- " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */
- " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */
- " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */
- #endif /* configTOTAL_MPU_REGIONS == 16 */
- " \n"
- " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */
- " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- " str r4, [r2] \n"/* Enable MPU. */
- " dsb \n"/* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
- " \n"
- #if ( configENABLE_MPU == 1 )
- " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
- #else /* configENABLE_MPU */
- " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
- #endif /* configENABLE_MPU */
+ " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -323,28 +421,66 @@
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
- #if ( configENABLE_MPU == 1 )
- " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
- " msr control, r2 \n"/* Restore the CONTROL register value for the task. */
- #else /* configENABLE_MPU */
- " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
- #endif /* configENABLE_MPU */
+ " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
- #if ( configENABLE_MPU == 1 )
- "xMPUCTRLConst: .word 0xe000ed94 \n"
- "xMAIR0Const: .word 0xe000edc0 \n"
- "xRNRConst: .word 0xe000ed98 \n"
- "xRBARConst: .word 0xe000ed9c \n"
- #endif /* configENABLE_MPU */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ ".extern vPortSVCHandler_C \n"
+ ".extern vSystemCallEnter \n"
+ ".extern vSystemCallEnter_1 \n"
+ ".extern vSystemCallExit \n"
+ " \n"
+ "tst lr, #4 \n"
+ "ite eq \n"
+ "mrseq r0, msp \n"
+ "mrsne r0, psp \n"
+ " \n"
+ "ldr r1, [r0, #24] \n"
+ "ldrb r2, [r1, #-2] \n"
+ "cmp r2, %0 \n"
+ "beq syscall_enter \n"
+ "cmp r2, %1 \n"
+ "beq syscall_enter_1 \n"
+ "cmp r2, %2 \n"
+ "beq syscall_exit \n"
+ "b vPortSVCHandler_C \n"
+ " \n"
+ "syscall_enter: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter \n"
+ " \n"
+ "syscall_enter_1: \n"
+ " mov r1, lr \n"
+ " b vSystemCallEnter_1 \n"
+ " \n"
+ "syscall_exit: \n"
+ " mov r1, lr \n"
+ " b vSystemCallExit \n"
+ " \n"
+ : /* No outputs. */
+ :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
+ : "r0", "r1", "r2", "memory"
+ );
+}
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
@@ -362,4 +498,6 @@
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
+++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..867642b
--- /dev/null
+++ b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1623 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM23/non_secure/port.c
+++ b/portable/IAR/ARM_CM23/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM23/non_secure/portasm.s b/portable/IAR/ARM_CM23/non_secure/portasm.s
index fffed8d..648ae00 100644
--- a/portable/IAR/ARM_CM23/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM23/non_secure/portasm.s
@@ -33,12 +33,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -98,65 +107,99 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r2] /* Program RNR = 4. */
- ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r2] /* Program RNR = 5. */
- ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r2] /* Program RNR = 6. */
- ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r2] /* Program RNR = 7. */
- ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
- stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -167,6 +210,7 @@
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -199,6 +243,149 @@
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r3} /* LR is now in r3. */
+ mov lr, r3 /* Restore LR. */
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+ stmia r2!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r2!, {r4-r7} /* Store r8-r11. */
+ ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r2!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ mov r6, lr /* r6 = LR. */
+ stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r2, #20
+ ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */
+ subs r2, #20
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ mov lr, r6
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r4} /* LR is now in r4. */
+ mov lr, r4
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r2, #48
+ ldmia r2!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r2, #32
+ ldmia r2!, {r4-r7} /* Restore r4-r7. */
+ subs r2, #16
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
@@ -216,41 +403,18 @@
bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
+
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
+
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #48 /* r2 = r2 - 48. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
@@ -261,7 +425,6 @@
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */
- #endif /* configENABLE_MPU */
select_next_task:
cpsid i
@@ -272,68 +435,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r5, #4 /* r5 = 4. */
- str r5, [r4] /* Program RNR = 4. */
- ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
- movs r5, #5 /* r5 = 5. */
- str r5, [r4] /* Program RNR = 5. */
- ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
- movs r5, #6 /* r5 = 6. */
- str r5, [r4] /* Program RNR = 6. */
- ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
- movs r5, #7 /* r5 = 7. */
- str r5, [r4] /* Program RNR = 7. */
- ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- movs r5, #1 /* r5 = 1. */
- orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -350,7 +451,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
adds r2, r2, #16 /* Move to the high registers. */
@@ -363,8 +463,45 @@
subs r2, r2, #32 /* Go back to the low registers. */
ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r2, [r0, #24]
+ subs r2, #2
+ ldrb r3, [r2, #0]
+ cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq system_call_enter
+ cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq system_call_enter_1
+ cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_enter_1:
+ b vSystemCallEnter_1
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
movs r0, #4
mov r1, lr
@@ -375,6 +512,8 @@
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..867642b
--- /dev/null
+++ b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1623 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0, r1}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0, r1}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0, r1}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0, r1}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0, r1}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0, r1}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0, r1}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0, r1}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0, r1}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0, r1}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0, r1}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0, r1}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0, r1}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0, r1}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0, r1}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0, r1}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0, r1}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0, r1}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0, r1}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0, r1}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0, r1}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0, r1}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0, r1}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0, r1}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0, r1}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0, r1}
+ mrs r0, control
+ movs r1, #1
+ tst r0, r1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0, r1}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0, r1}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
+++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
index 62bd387..8f77c4d 100644
--- a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -88,63 +97,97 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs_first_task:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -153,6 +196,7 @@
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -187,23 +231,127 @@
bx lr
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ stmia r1!, {r4-r7} /* Store r4-r7. */
+ mov r4, r8 /* r4 = r8. */
+ mov r5, r9 /* r5 = r9. */
+ mov r6, r10 /* r6 = r10. */
+ mov r7, r11 /* r7 = r11. */
+ stmia r1!, {r4-r7} /* Store r8-r11. */
+ ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+ ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */
+ stmia r1!, {r4-r7} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r2, psp /* r2 = PSP. */
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ mov r5, lr /* r5 = LR. */
+ stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ cpsid i
+ bl vTaskSwitchContext
+ cpsie i
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */
+ movs r3, #5 /* r3 = 5. */
+ str r3, [r1] /* Program RNR = 5. */
+ ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */
+ movs r3, #6 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 6. */
+ ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */
+ movs r3, #7 /* r3 = 6. */
+ str r3, [r1] /* Program RNR = 7. */
+ ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+ stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ movs r3, #1 /* r3 = 1. */
+ orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ subs r1, #16
+ ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */
+ subs r1, #16
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+ mov lr, r5
+
+ restore_general_regs:
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */
+ ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */
+ stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */
+ subs r1, #48
+ ldmia r1!, {r4-r7} /* Restore r8-r11. */
+ mov r8, r4 /* r8 = r4. */
+ mov r9, r5 /* r9 = r5. */
+ mov r10, r6 /* r10 = r6. */
+ mov r11, r7 /* r11 = r7. */
+ subs r1, #32
+ ldmia r1!, {r4-r7} /* Restore r4-r7. */
+ subs r1, #16
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r0, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
- mov r4, r8 /* r4 = r8. */
- mov r5, r9 /* r5 = r9. */
- mov r6, r10 /* r6 = r10. */
- mov r7, r11 /* r7 = r11. */
- stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#else /* configENABLE_MPU */
+
subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */
str r0, [r1] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
@@ -214,7 +362,6 @@
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
-#endif /* configENABLE_MPU */
cpsid i
bl vTaskSwitchContext
@@ -224,63 +371,6 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
- movs r4, #5 /* r4 = 5. */
- str r4, [r2] /* Program RNR = 5. */
- ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
- movs r4, #6 /* r4 = 6. */
- str r4, [r2] /* Program RNR = 6. */
- ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
- movs r4, #7 /* r4 = 7. */
- str r4, [r2] /* Program RNR = 7. */
- ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- movs r4, #1 /* r4 = 1. */
- orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- adds r0, r0, #28 /* Move to the high registers. */
- ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
- mov r8, r4 /* r8 = r4. */
- mov r9, r5 /* r9 = r5. */
- mov r10, r6 /* r10 = r6. */
- mov r11, r7 /* r11 = r7. */
- msr psp, r0 /* Remember the new top of stack for the task. */
- subs r0, r0, #44 /* Move to the starting of the saved context. */
- ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
- bx r3
-#else /* configENABLE_MPU */
adds r0, r0, #24 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
mov r8, r4 /* r8 = r4. */
@@ -292,9 +382,45 @@
ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
bx r3
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ movs r0, #4
+ mov r1, lr
+ tst r0, r1
+ beq stack_on_msp
+ stack_on_psp:
+ mrs r0, psp
+ b route_svc
+ stack_on_msp:
+ mrs r0, msp
+ b route_svc
+
+ route_svc:
+ ldr r2, [r0, #24]
+ subs r2, #2
+ ldrb r3, [r2, #0]
+ cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq system_call_enter
+ cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq system_call_enter_1
+ cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq system_call_exit
+ b vPortSVCHandler_C
+
+ system_call_enter:
+ b vSystemCallEnter
+ system_call_enter_1:
+ b vSystemCallEnter_1
+ system_call_exit:
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
movs r0, #4
mov r1, lr
@@ -305,6 +431,8 @@
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM33/non_secure/port.c
+++ b/portable/IAR/ARM_CM33/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM33/non_secure/portasm.s b/portable/IAR/ARM_CM33/non_secure/portasm.s
index a193cd7..15e74ff 100644
--- a/portable/IAR/ARM_CM33/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM33/non_secure/portasm.s
@@ -32,12 +32,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -89,50 +98,81 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -145,6 +185,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -183,6 +224,143 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
@@ -200,20 +378,11 @@
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
@@ -224,17 +393,6 @@
it eq
vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
adds r2, r2, #12 /* r2 = r2 + 12. */
@@ -243,7 +401,6 @@
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r2, r2, #12 /* r2 = r2 - 12. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
select_next_task:
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
@@ -258,51 +415,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -319,7 +431,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
@@ -330,14 +441,50 @@
#endif /* configENABLE_FPU || configENABLE_MVE */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
+++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
index 581b84d..ec52025 100644
--- a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -79,48 +88,79 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -131,6 +171,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -169,6 +210,114 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@@ -176,16 +325,10 @@
it eq
vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
-#else /* configENABLE_MPU */
+
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
@@ -203,37 +346,7 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -241,22 +354,53 @@
vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
-#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM35P/non_secure/port.c
+++ b/portable/IAR/ARM_CM35P/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.s b/portable/IAR/ARM_CM35P/non_secure/portasm.s
index a193cd7..15e74ff 100644
--- a/portable/IAR/ARM_CM35P/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM35P/non_secure/portasm.s
@@ -32,12 +32,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -89,50 +98,81 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -145,6 +185,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -183,6 +224,143 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
@@ -200,20 +378,11 @@
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
@@ -224,17 +393,6 @@
it eq
vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
adds r2, r2, #12 /* r2 = r2 + 12. */
@@ -243,7 +401,6 @@
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r2, r2, #12 /* r2 = r2 - 12. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
select_next_task:
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
@@ -258,51 +415,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -319,7 +431,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
@@ -330,14 +441,50 @@
#endif /* configENABLE_FPU || configENABLE_MVE */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c
+++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s
index 581b84d..ec52025 100644
--- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -79,48 +88,79 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -131,6 +171,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -169,6 +210,114 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@@ -176,16 +325,10 @@
it eq
vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
-#else /* configENABLE_MPU */
+
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
@@ -203,37 +346,7 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -241,22 +354,53 @@
vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
-#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..a0541f7
--- /dev/null
+++ b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1556 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Including FreeRTOSConfig.h here will cause build errors if the header file
+ * contains code not understood by the assembler - for example the 'extern' keyword.
+ * To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
+ * the code is included in C files but excluded by the preprocessor in assembly
+ * files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
+#include "FreeRTOSConfig.h"
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 5
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c
index 2d70b33..27f6f0d 100755
--- a/portable/IAR/ARM_CM4F_MPU/port.c
+++ b/portable/IAR/ARM_CM4F_MPU/port.c
@@ -132,8 +132,14 @@
#define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 )
#define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* The systick is a 24-bit counter. */
#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
@@ -147,6 +153,21 @@
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/*
* Configure a number of standard MPU regions that are used by all tasks.
*/
@@ -184,7 +205,7 @@
/*
* The C portion of the SVC handler.
*/
-void vPortSVCHandler_C( uint32_t * pulParam );
+void vPortSVCHandler_C( uint32_t * pulParam ) PRIVILEGED_FUNCTION;
/*
* Called from the SVC handler used to start the scheduler.
@@ -208,6 +229,57 @@
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
@@ -233,46 +305,56 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
-
- /* Offset added to account for the way the MCU uses the stack on entry/exit
- * of interrupts, and to ensure alignment. */
- pxTopOfStack--;
-
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0; /* LR */
-
- /* Save code space by skipping register initialisation. */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
-void vPortSVCHandler_C( uint32_t * pulParam )
+void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
uint8_t ucSVCNumber;
uint32_t ulPC;
@@ -334,7 +416,7 @@
::: "r1", "memory"
);
break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
default: /* Unknown SVC call. */
break;
@@ -342,6 +424,308 @@
}
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " bic r1, #1 \n" /* Clear nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r1, control \n" /* Obtain current control value. */
+ " orr r1, #1 \n" /* Set nPRIV bit. */
+ " msr control, r1 \n" /* Write back new control value. */
+ ::: "r1", "memory"
+ );
+
+ /* Restore the stacked link register to what it was at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
/*
* See header file for description.
*/
@@ -738,11 +1122,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -765,6 +1157,13 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
}
lIndex = 0;
@@ -785,12 +1184,28 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -799,6 +1214,48 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
+
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
diff --git a/portable/IAR/ARM_CM4F_MPU/portasm.s b/portable/IAR/ARM_CM4F_MPU/portasm.s
index db751f6..3cbe5e0 100644
--- a/portable/IAR/ARM_CM4F_MPU/portasm.s
+++ b/portable/IAR/ARM_CM4F_MPU/portasm.s
@@ -25,6 +25,7 @@
* https://github.com/FreeRTOS
*
*/
+
/* Including FreeRTOSConfig.h here will cause build errors if the header file
contains code not understood by the assembler - for example the 'extern' keyword.
To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
@@ -38,6 +39,9 @@
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
@@ -49,99 +53,141 @@
/*-----------------------------------------------------------*/
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 3
+#define portSVC_SYSTEM_CALL_ENTER_1 4
+#define portSVC_SYSTEM_CALL_EXIT 5
+/*-----------------------------------------------------------*/
+
xPortPendSVHandler:
+
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location where the context should be saved. */
+
+ /*------------ Save Context. ----------- */
+ mrs r3, control
mrs r0, psp
isb
- /* Get the location of the current TCB. */
- ldr r3, =pxCurrentTCB
- ldr r2, [r3]
- /* Is the task using the FPU context? If so, push high vfp registers. */
- tst r14, #0x10
- it eq
- vstmdbeq r0!, {s16-s31}
+ add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */
- /* Save the core registers. */
- mrs r1, control
- stmdb r0!, {r1, r4-r11, r14}
+ stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */
+ ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */
+ stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */
- /* Save the new top of stack into the first member of the TCB. */
- str r0, [r2]
-
- stmdb sp!, {r0, r3}
+ /*---------- Select next task. --------- */
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
msr basepri, r0
dsb
isb
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
- ldmia sp!, {r0, r3}
- /* The first item in pxCurrentTCB is the task top of stack. */
- ldr r1, [r3]
- ldr r0, [r1]
- /* Move onto the second item in the TCB... */
- add r1, r1, #4
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- /* Region Base Address register. */
- ldr r2, =0xe000ed9c
- /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, {r4-r11}
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- #ifdef configTOTAL_MPU_REGIONS
- #if ( configTOTAL_MPU_REGIONS == 16 )
- /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- stmia r2, {r4-r11}
- /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- stmia r2, {r4-r11}
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- #endif /* configTOTAL_MPU_REGIONS */
+#ifdef configTOTAL_MPU_REGIONS
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+#endif
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
- /* Pop the registers that are not automatically saved on exception entry. */
- ldmia r0!, {r3-r11, r14}
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
msr control, r3
- /* Is the task using the FPU context? If so, pop the high vfp registers
- too. */
- tst r14, #0x10
- it eq
- vldmiaeq r0!, {s16-s31}
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
- msr psp, r0
- isb
-
- bx r14
-
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
/*-----------------------------------------------------------*/
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
vPortSVCHandler:
- #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #portSVC_SYSTEM_CALL_ENTER
+ beq syscall_enter
+ cmp r2, #portSVC_SYSTEM_CALL_ENTER_1
+ beq syscall_enter_1
+ cmp r2, #portSVC_SYSTEM_CALL_EXIT
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+vPortSVCHandler:
+ #ifndef USE_PROCESS_STACK
tst lr, #4
ite eq
mrseq r0, msp
@@ -151,6 +197,7 @@
#endif
b vPortSVCHandler_C
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortStartFirstTask:
@@ -176,60 +223,56 @@
/*-----------------------------------------------------------*/
vPortRestoreContextOfFirstTask:
- /* Use the NVIC offset register to locate the stack. */
- ldr r0, =0xE000ED08
+ ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0]
ldr r0, [r0]
- /* Set the msp back to the start of the stack. */
- msr msp, r0
- /* Restore the context. */
+ msr msp, r0 /* Set the msp back to the start of the stack. */
+
+ /*------------ Program MPU. ------------ */
ldr r3, =pxCurrentTCB
- ldr r1, [r3]
- /* The first item in the TCB is the task top of stack. */
- ldr r0, [r1]
- /* Move onto the second item in the TCB... */
- add r1, r1, #4
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [r2] /* Disable MPU. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- /* Region Base Address register. */
- ldr r2, =0xe000ed9c
- /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, {r4-r11}
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- #ifdef configTOTAL_MPU_REGIONS
- #if ( configTOTAL_MPU_REGIONS == 16 )
- /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- stmia r2, {r4-r11}
- /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- ldmia r1!, {r4-r11}
- /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- stmia r2, {r4-r11}
- #endif /* configTOTAL_MPU_REGIONS == 16. */
- #endif /* configTOTAL_MPU_REGIONS */
+#ifdef configTOTAL_MPU_REGIONS
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ #endif /* configTOTAL_MPU_REGIONS == 16. */
+#endif
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [r2] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
- /* Pop the registers that are not automatically saved on exception entry. */
- ldmia r0!, {r3-r11, r14}
- msr control, r3
- /* Restore the task stack pointer. */
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
msr psp, r0
+ stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
+ msr control, r3
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+
mov r0, #0
msr basepri, r0
- bx r14
+ bx lr
/*-----------------------------------------------------------*/
diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h
index 96787e7..4bb8abc 100644
--- a/portable/IAR/ARM_CM4F_MPU/portmacro.h
+++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h
@@ -195,9 +195,45 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+#define MAX_CONTEXT_SIZE 52
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -207,9 +243,12 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 0
+#define portSVC_YIELD 1
+#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 5
/* Scheduler utilities. */
@@ -348,6 +387,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
#ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY
#warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html"
#define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0
diff --git a/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM55/non_secure/port.c
+++ b/portable/IAR/ARM_CM55/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM55/non_secure/portasm.s b/portable/IAR/ARM_CM55/non_secure/portasm.s
index a193cd7..15e74ff 100644
--- a/portable/IAR/ARM_CM55/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM55/non_secure/portasm.s
@@ -32,12 +32,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -89,50 +98,81 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -145,6 +185,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -183,6 +224,143 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
@@ -200,20 +378,11 @@
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
@@ -224,17 +393,6 @@
it eq
vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
adds r2, r2, #12 /* r2 = r2 + 12. */
@@ -243,7 +401,6 @@
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r2, r2, #12 /* r2 = r2 - 12. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
select_next_task:
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
@@ -258,51 +415,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -319,7 +431,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
@@ -330,14 +441,50 @@
#endif /* configENABLE_FPU || configENABLE_MVE */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
+++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
index 581b84d..ec52025 100644
--- a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -79,48 +88,79 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -131,6 +171,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -169,6 +210,114 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@@ -176,16 +325,10 @@
it eq
vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
-#else /* configENABLE_MPU */
+
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
@@ -203,37 +346,7 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -241,22 +354,53 @@
vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
-#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM85/non_secure/port.c
+++ b/portable/IAR/ARM_CM85/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM85/non_secure/portasm.s b/portable/IAR/ARM_CM85/non_secure/portasm.s
index a193cd7..15e74ff 100644
--- a/portable/IAR/ARM_CM85/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM85/non_secure/portasm.s
@@ -32,12 +32,21 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -89,50 +98,81 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+
+ restore_general_regs_first_task:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
- ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r2] /* Program RNR = 4. */
- adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
- ldr r5, =xSecureContext
- str r1, [r5] /* Set xSecureContext to this task's value for the same. */
- msr psplim, r2 /* Set this task's PSPLIM value. */
- msr control, r3 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r4 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
@@ -145,6 +185,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r3 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -183,6 +224,143 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
+ ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */
+
+ cbz r0, save_ns_context /* No secure context to save. */
+ save_s_context:
+ push {r0-r2, lr}
+ bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r0-r2, lr}
+
+ save_ns_context:
+ mov r3, lr /* r3 = LR (EXC_RETURN). */
+ lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ save_general_regs:
+ mrs r3, psp
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r2!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */
+ sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r2!, {r4-r11} /* Store r4-r11. */
+ ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r2!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psp /* r3 = PSP. */
+ mrs r4, psplim /* r4 = PSPLIM. */
+ mrs r5, control /* r5 = CONTROL. */
+ stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r3] /* r0 = pxCurrentTCB.*/
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r1, [r3] /* r1 = pxCurrentTCB.*/
+ ldr r2, [r1] /* r2 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */
+ msr psp, r3
+ msr psplim, r4
+ msr control, r5
+ ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
+ str r0, [r4] /* Restore xSecureContext. */
+ cbz r0, restore_ns_context /* No secure context to restore. */
+
+ restore_s_context:
+ push {r1-r3, lr}
+ bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
+ pop {r1-r3, lr}
+
+ restore_ns_context:
+ mov r0, lr /* r0 = LR (EXC_RETURN). */
+ lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
+ bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */
+
+ restore_general_regs:
+ ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r2!, {r4-r11} /* r4-r11 restored. */
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
@@ -200,20 +378,11 @@
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r3] /* Read pxCurrentTCB. */
-#if ( configENABLE_MPU == 1 )
- subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
-#else /* configENABLE_MPU */
subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
-#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
@@ -224,17 +393,6 @@
it eq
vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
- str r2, [r1] /* Save the new top of stack in TCB. */
- adds r2, r2, #16 /* r2 = r2 + 16. */
- stm r2, {r4-r11} /* Store the registers that are not saved automatically. */
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r3, control /* r3 = CONTROL. */
- mov r4, lr /* r4 = LR/EXC_RETURN. */
- subs r2, r2, #16 /* r2 = r2 - 16. */
- stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
- #else /* configENABLE_MPU */
subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r2, [r1] /* Save the new top of stack in TCB. */
adds r2, r2, #12 /* r2 = r2 + 12. */
@@ -243,7 +401,6 @@
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r2, r2, #12 /* r2 = r2 - 12. */
stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
- #endif /* configENABLE_MPU */
select_next_task:
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
@@ -258,51 +415,6 @@
ldr r1, [r3] /* Read pxCurrentTCB. */
ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
- #if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r3] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
- ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */
- str r4, [r3] /* Program MAIR0. */
- ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */
- movs r4, #4 /* r4 = 4. */
- str r4, [r3] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r3] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r3] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
- #endif /* configENABLE_MPU */
-
- #if ( configENABLE_MPU == 1 )
- ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r3 /* Restore the CONTROL register value for the task. */
- mov lr, r4 /* LR = r4. */
- ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
- str r0, [r3] /* Restore the task's xSecureContext. */
- cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
- ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
- ldr r1, [r3] /* Read pxCurrentTCB. */
- push {r2, r4}
- bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
- pop {r2, r4}
- mov lr, r4 /* LR = r4. */
- lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
- bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
- msr psp, r2 /* Remember the new top of stack for the task. */
- bx lr
- #else /* configENABLE_MPU */
ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
mov lr, r4 /* LR = r4. */
@@ -319,7 +431,6 @@
bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
- #endif /* configENABLE_MPU */
restore_ns_context:
ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */
@@ -330,14 +441,50 @@
#endif /* configENABLE_FPU || configENABLE_MVE */
msr psp, r2 /* Remember the new top of stack for the task. */
bx lr
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S
new file mode 100644
index 0000000..f051a60
--- /dev/null
+++ b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S
@@ -0,0 +1,1552 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+
+ SECTION freertos_system_calls:CODE:NOROOT(2)
+ THUMB
+/*-----------------------------------------------------------*/
+
+#include "FreeRTOSConfig.h"
+
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
+/* These must be in sync with portmacro.h. */
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ PUBLIC MPU_xTaskDelayUntil
+MPU_xTaskDelayUntil:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+ MPU_xTaskDelayUntil_Priv:
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+ MPU_xTaskDelayUntil_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskAbortDelay
+MPU_xTaskAbortDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+ MPU_xTaskAbortDelay_Priv:
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+ MPU_xTaskAbortDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskDelay
+MPU_vTaskDelay:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+ MPU_vTaskDelay_Priv:
+ pop {r0}
+ b MPU_vTaskDelayImpl
+ MPU_vTaskDelay_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskPriorityGet
+MPU_uxTaskPriorityGet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+ MPU_uxTaskPriorityGet_Priv:
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+ MPU_uxTaskPriorityGet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_eTaskGetState
+MPU_eTaskGetState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+ MPU_eTaskGetState_Priv:
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+ MPU_eTaskGetState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskGetInfo
+MPU_vTaskGetInfo:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+ MPU_vTaskGetInfo_Priv:
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+ MPU_vTaskGetInfo_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetIdleTaskHandle
+MPU_xTaskGetIdleTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+ MPU_xTaskGetIdleTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+ MPU_xTaskGetIdleTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSuspend
+MPU_vTaskSuspend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+ MPU_vTaskSuspend_Priv:
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+ MPU_vTaskSuspend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskResume
+MPU_vTaskResume:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+ MPU_vTaskResume_Priv:
+ pop {r0}
+ b MPU_vTaskResumeImpl
+ MPU_vTaskResume_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetTickCount
+MPU_xTaskGetTickCount:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+ MPU_xTaskGetTickCount_Priv:
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+ MPU_xTaskGetTickCount_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetNumberOfTasks
+MPU_uxTaskGetNumberOfTasks:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+ MPU_uxTaskGetNumberOfTasks_Priv:
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+ MPU_uxTaskGetNumberOfTasks_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTaskGetName
+MPU_pcTaskGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+ MPU_pcTaskGetName_Priv:
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+ MPU_pcTaskGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimeCounter
+MPU_ulTaskGetRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+ MPU_ulTaskGetRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+ MPU_ulTaskGetRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetRunTimePercent
+MPU_ulTaskGetRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+ MPU_ulTaskGetRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+ MPU_ulTaskGetRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimePercent
+MPU_ulTaskGetIdleRunTimePercent:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ MPU_ulTaskGetIdleRunTimePercent_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+ MPU_ulTaskGetIdleRunTimePercent_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGetIdleRunTimeCounter
+MPU_ulTaskGetIdleRunTimeCounter:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ MPU_ulTaskGetIdleRunTimeCounter_Priv:
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+ MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetApplicationTaskTag
+MPU_vTaskSetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+ MPU_vTaskSetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+ MPU_vTaskSetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetApplicationTaskTag
+MPU_xTaskGetApplicationTaskTag:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+ MPU_xTaskGetApplicationTaskTag_Priv:
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+ MPU_xTaskGetApplicationTaskTag_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetThreadLocalStoragePointer
+MPU_vTaskSetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ MPU_vTaskSetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+ MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
+MPU_pvTaskGetThreadLocalStoragePointer:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ MPU_pvTaskGetThreadLocalStoragePointer_Priv:
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+ MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetSystemState
+MPU_uxTaskGetSystemState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+ MPU_uxTaskGetSystemState_Priv:
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+ MPU_uxTaskGetSystemState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark
+MPU_uxTaskGetStackHighWaterMark:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+ MPU_uxTaskGetStackHighWaterMark_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+ MPU_uxTaskGetStackHighWaterMark_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTaskGetStackHighWaterMark2
+MPU_uxTaskGetStackHighWaterMark2:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ MPU_uxTaskGetStackHighWaterMark2_Priv:
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+ MPU_uxTaskGetStackHighWaterMark2_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetCurrentTaskHandle
+MPU_xTaskGetCurrentTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+ MPU_xTaskGetCurrentTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+ MPU_xTaskGetCurrentTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGetSchedulerState
+MPU_xTaskGetSchedulerState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+ MPU_xTaskGetSchedulerState_Priv:
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+ MPU_xTaskGetSchedulerState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTaskSetTimeOutState
+MPU_vTaskSetTimeOutState:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+ MPU_vTaskSetTimeOutState_Priv:
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+ MPU_vTaskSetTimeOutState_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskCheckForTimeOut
+MPU_xTaskCheckForTimeOut:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+ MPU_xTaskCheckForTimeOut_Priv:
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+ MPU_xTaskCheckForTimeOut_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotify
+MPU_xTaskGenericNotify:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+ MPU_xTaskGenericNotify_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+ MPU_xTaskGenericNotify_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyWait
+MPU_xTaskGenericNotifyWait:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+ MPU_xTaskGenericNotifyWait_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+ MPU_xTaskGenericNotifyWait_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyTake
+MPU_ulTaskGenericNotifyTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+ MPU_ulTaskGenericNotifyTake_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+ MPU_ulTaskGenericNotifyTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTaskGenericNotifyStateClear
+MPU_xTaskGenericNotifyStateClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+ MPU_xTaskGenericNotifyStateClear_Priv:
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+ MPU_xTaskGenericNotifyStateClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_ulTaskGenericNotifyValueClear
+MPU_ulTaskGenericNotifyValueClear:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+ MPU_ulTaskGenericNotifyValueClear_Priv:
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+ MPU_ulTaskGenericNotifyValueClear_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGenericSend
+MPU_xQueueGenericSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+ MPU_xQueueGenericSend_Priv:
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+ MPU_xQueueGenericSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueMessagesWaiting
+MPU_uxQueueMessagesWaiting:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+ MPU_uxQueueMessagesWaiting_Priv:
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+ MPU_uxQueueMessagesWaiting_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxQueueSpacesAvailable
+MPU_uxQueueSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+ MPU_uxQueueSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+ MPU_uxQueueSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueReceive
+MPU_xQueueReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+ MPU_xQueueReceive_Priv:
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+ MPU_xQueueReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueuePeek
+MPU_xQueuePeek:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+ MPU_xQueuePeek_Priv:
+ pop {r0}
+ b MPU_xQueuePeekImpl
+ MPU_xQueuePeek_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSemaphoreTake
+MPU_xQueueSemaphoreTake:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+ MPU_xQueueSemaphoreTake_Priv:
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+ MPU_xQueueSemaphoreTake_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGetMutexHolder
+MPU_xQueueGetMutexHolder:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+ MPU_xQueueGetMutexHolder_Priv:
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+ MPU_xQueueGetMutexHolder_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueTakeMutexRecursive
+MPU_xQueueTakeMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+ MPU_xQueueTakeMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+ MPU_xQueueTakeMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueGiveMutexRecursive
+MPU_xQueueGiveMutexRecursive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+ MPU_xQueueGiveMutexRecursive_Priv:
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+ MPU_xQueueGiveMutexRecursive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueSelectFromSet
+MPU_xQueueSelectFromSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+ MPU_xQueueSelectFromSet_Priv:
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+ MPU_xQueueSelectFromSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xQueueAddToSet
+MPU_xQueueAddToSet:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+ MPU_xQueueAddToSet_Priv:
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+ MPU_xQueueAddToSet_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueAddToRegistry
+MPU_vQueueAddToRegistry:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+ MPU_vQueueAddToRegistry_Priv:
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+ MPU_vQueueAddToRegistry_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vQueueUnregisterQueue
+MPU_vQueueUnregisterQueue:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+ MPU_vQueueUnregisterQueue_Priv:
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+ MPU_vQueueUnregisterQueue_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcQueueGetName
+MPU_pcQueueGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+ MPU_pcQueueGetName_Priv:
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+ MPU_pcQueueGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pvTimerGetTimerID
+MPU_pvTimerGetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+ MPU_pvTimerGetTimerID_Priv:
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+ MPU_pvTimerGetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetTimerID
+MPU_vTimerSetTimerID:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+ MPU_vTimerSetTimerID_Priv:
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+ MPU_vTimerSetTimerID_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerIsTimerActive
+MPU_xTimerIsTimerActive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+ MPU_xTimerIsTimerActive_Priv:
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+ MPU_xTimerIsTimerActive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
+MPU_xTimerGetTimerDaemonTaskHandle:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ MPU_xTimerGetTimerDaemonTaskHandle_Priv:
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+ MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGenericCommand
+MPU_xTimerGenericCommand:
+ push {r0}
+ /* This function can be called from ISR also and therefore, we need a check
+ * to take privileged path, if called from ISR. */
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+ MPU_xTimerGenericCommand_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+ MPU_xTimerGenericCommand_Priv:
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_pcTimerGetName
+MPU_pcTimerGetName:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+ MPU_pcTimerGetName_Priv:
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+ MPU_pcTimerGetName_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vTimerSetReloadMode
+MPU_vTimerSetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+ MPU_vTimerSetReloadMode_Priv:
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+ MPU_vTimerSetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetReloadMode
+MPU_xTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+ MPU_xTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+ MPU_xTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxTimerGetReloadMode
+MPU_uxTimerGetReloadMode:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+ MPU_uxTimerGetReloadMode_Priv:
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+ MPU_uxTimerGetReloadMode_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetPeriod
+MPU_xTimerGetPeriod:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+ MPU_xTimerGetPeriod_Priv:
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+ MPU_xTimerGetPeriod_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xTimerGetExpiryTime
+MPU_xTimerGetExpiryTime:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+ MPU_xTimerGetExpiryTime_Priv:
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+ MPU_xTimerGetExpiryTime_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupWaitBits
+MPU_xEventGroupWaitBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+ MPU_xEventGroupWaitBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+ MPU_xEventGroupWaitBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupClearBits
+MPU_xEventGroupClearBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+ MPU_xEventGroupClearBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+ MPU_xEventGroupClearBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSetBits
+MPU_xEventGroupSetBits:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+ MPU_xEventGroupSetBits_Priv:
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+ MPU_xEventGroupSetBits_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xEventGroupSync
+MPU_xEventGroupSync:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+ MPU_xEventGroupSync_Priv:
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+ MPU_xEventGroupSync_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_uxEventGroupGetNumber
+MPU_uxEventGroupGetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+ MPU_uxEventGroupGetNumber_Priv:
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+ MPU_uxEventGroupGetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_vEventGroupSetNumber
+MPU_vEventGroupSetNumber:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+ MPU_vEventGroupSetNumber_Priv:
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+ MPU_vEventGroupSetNumber_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSend
+MPU_xStreamBufferSend:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+ MPU_xStreamBufferSend_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+ MPU_xStreamBufferSend_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferReceive
+MPU_xStreamBufferReceive:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+ MPU_xStreamBufferReceive_Priv:
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+ MPU_xStreamBufferReceive_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsFull
+MPU_xStreamBufferIsFull:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+ MPU_xStreamBufferIsFull_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+ MPU_xStreamBufferIsFull_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferIsEmpty
+MPU_xStreamBufferIsEmpty:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+ MPU_xStreamBufferIsEmpty_Priv:
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+ MPU_xStreamBufferIsEmpty_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSpacesAvailable
+MPU_xStreamBufferSpacesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+ MPU_xStreamBufferSpacesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+ MPU_xStreamBufferSpacesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferBytesAvailable
+MPU_xStreamBufferBytesAvailable:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+ MPU_xStreamBufferBytesAvailable_Priv:
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+ MPU_xStreamBufferBytesAvailable_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferSetTriggerLevel
+MPU_xStreamBufferSetTriggerLevel:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+ MPU_xStreamBufferSetTriggerLevel_Priv:
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+ MPU_xStreamBufferSetTriggerLevel_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+ PUBLIC MPU_xStreamBufferNextMessageLengthBytes
+MPU_xStreamBufferNextMessageLengthBytes:
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ MPU_xStreamBufferNextMessageLengthBytes_Priv:
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+ MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+/*-----------------------------------------------------------*/
+
+/* Default weak implementations in case one is not available from
+ * mpu_wrappers because of config options. */
+
+ PUBWEAK MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntilImpl:
+ b MPU_xTaskDelayUntilImpl
+
+ PUBWEAK MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelayImpl:
+ b MPU_xTaskAbortDelayImpl
+
+ PUBWEAK MPU_vTaskDelayImpl
+MPU_vTaskDelayImpl:
+ b MPU_vTaskDelayImpl
+
+ PUBWEAK MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGetImpl:
+ b MPU_uxTaskPriorityGetImpl
+
+ PUBWEAK MPU_eTaskGetStateImpl
+MPU_eTaskGetStateImpl:
+ b MPU_eTaskGetStateImpl
+
+ PUBWEAK MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfoImpl:
+ b MPU_vTaskGetInfoImpl
+
+ PUBWEAK MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandleImpl:
+ b MPU_xTaskGetIdleTaskHandleImpl
+
+ PUBWEAK MPU_vTaskSuspendImpl
+MPU_vTaskSuspendImpl:
+ b MPU_vTaskSuspendImpl
+
+ PUBWEAK MPU_vTaskResumeImpl
+MPU_vTaskResumeImpl:
+ b MPU_vTaskResumeImpl
+
+ PUBWEAK MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCountImpl:
+ b MPU_xTaskGetTickCountImpl
+
+ PUBWEAK MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasksImpl:
+ b MPU_uxTaskGetNumberOfTasksImpl
+
+ PUBWEAK MPU_pcTaskGetNameImpl
+MPU_pcTaskGetNameImpl:
+ b MPU_pcTaskGetNameImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounterImpl:
+ b MPU_ulTaskGetRunTimeCounterImpl
+
+ PUBWEAK MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercentImpl:
+ b MPU_ulTaskGetRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercentImpl:
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+
+ PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounterImpl:
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ PUBWEAK MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTagImpl:
+ b MPU_vTaskSetApplicationTaskTagImpl
+
+ PUBWEAK MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTagImpl:
+ b MPU_xTaskGetApplicationTaskTagImpl
+
+ PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointerImpl:
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointerImpl:
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ PUBWEAK MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemStateImpl:
+ b MPU_uxTaskGetSystemStateImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMarkImpl:
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+
+ PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2Impl:
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+
+ PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandleImpl:
+ b MPU_xTaskGetCurrentTaskHandleImpl
+
+ PUBWEAK MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerStateImpl:
+ b MPU_xTaskGetSchedulerStateImpl
+
+ PUBWEAK MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutStateImpl:
+ b MPU_vTaskSetTimeOutStateImpl
+
+ PUBWEAK MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOutImpl:
+ b MPU_xTaskCheckForTimeOutImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotifyImpl:
+ b MPU_xTaskGenericNotifyImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWaitImpl:
+ b MPU_xTaskGenericNotifyWaitImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTakeImpl:
+ b MPU_ulTaskGenericNotifyTakeImpl
+
+ PUBWEAK MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClearImpl:
+ b MPU_xTaskGenericNotifyStateClearImpl
+
+ PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClearImpl:
+ b MPU_ulTaskGenericNotifyValueClearImpl
+
+ PUBWEAK MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSendImpl:
+ b MPU_xQueueGenericSendImpl
+
+ PUBWEAK MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaitingImpl:
+ b MPU_uxQueueMessagesWaitingImpl
+
+ PUBWEAK MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailableImpl:
+ b MPU_uxQueueSpacesAvailableImpl
+
+ PUBWEAK MPU_xQueueReceiveImpl
+MPU_xQueueReceiveImpl:
+ b MPU_xQueueReceiveImpl
+
+ PUBWEAK MPU_xQueuePeekImpl
+MPU_xQueuePeekImpl:
+ b MPU_xQueuePeekImpl
+
+ PUBWEAK MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTakeImpl:
+ b MPU_xQueueSemaphoreTakeImpl
+
+ PUBWEAK MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolderImpl:
+ b MPU_xQueueGetMutexHolderImpl
+
+ PUBWEAK MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursiveImpl:
+ b MPU_xQueueTakeMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursiveImpl:
+ b MPU_xQueueGiveMutexRecursiveImpl
+
+ PUBWEAK MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSetImpl:
+ b MPU_xQueueSelectFromSetImpl
+
+ PUBWEAK MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSetImpl:
+ b MPU_xQueueAddToSetImpl
+
+ PUBWEAK MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistryImpl:
+ b MPU_vQueueAddToRegistryImpl
+
+ PUBWEAK MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueueImpl:
+ b MPU_vQueueUnregisterQueueImpl
+
+ PUBWEAK MPU_pcQueueGetNameImpl
+MPU_pcQueueGetNameImpl:
+ b MPU_pcQueueGetNameImpl
+
+ PUBWEAK MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerIDImpl:
+ b MPU_pvTimerGetTimerIDImpl
+
+ PUBWEAK MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerIDImpl:
+ b MPU_vTimerSetTimerIDImpl
+
+ PUBWEAK MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActiveImpl:
+ b MPU_xTimerIsTimerActiveImpl
+
+ PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandleImpl:
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ PUBWEAK MPU_xTimerGenericCommandImpl
+MPU_xTimerGenericCommandImpl:
+ b MPU_xTimerGenericCommandImpl
+
+ PUBWEAK MPU_pcTimerGetNameImpl
+MPU_pcTimerGetNameImpl:
+ b MPU_pcTimerGetNameImpl
+
+ PUBWEAK MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadModeImpl:
+ b MPU_vTimerSetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadModeImpl:
+ b MPU_xTimerGetReloadModeImpl
+
+ PUBWEAK MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadModeImpl:
+ b MPU_uxTimerGetReloadModeImpl
+
+ PUBWEAK MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriodImpl:
+ b MPU_xTimerGetPeriodImpl
+
+ PUBWEAK MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTimeImpl:
+ b MPU_xTimerGetExpiryTimeImpl
+
+ PUBWEAK MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBitsImpl:
+ b MPU_xEventGroupWaitBitsImpl
+
+ PUBWEAK MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBitsImpl:
+ b MPU_xEventGroupClearBitsImpl
+
+ PUBWEAK MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBitsImpl:
+ b MPU_xEventGroupSetBitsImpl
+
+ PUBWEAK MPU_xEventGroupSyncImpl
+MPU_xEventGroupSyncImpl:
+ b MPU_xEventGroupSyncImpl
+
+ PUBWEAK MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumberImpl:
+ b MPU_uxEventGroupGetNumberImpl
+
+ PUBWEAK MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumberImpl:
+ b MPU_vEventGroupSetNumberImpl
+
+ PUBWEAK MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSendImpl:
+ b MPU_xStreamBufferSendImpl
+
+ PUBWEAK MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceiveImpl:
+ b MPU_xStreamBufferReceiveImpl
+
+ PUBWEAK MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFullImpl:
+ b MPU_xStreamBufferIsFullImpl
+
+ PUBWEAK MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmptyImpl:
+ b MPU_xStreamBufferIsEmptyImpl
+
+ PUBWEAK MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailableImpl:
+ b MPU_xStreamBufferSpacesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailableImpl:
+ b MPU_xStreamBufferBytesAvailableImpl
+
+ PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevelImpl:
+ b MPU_xStreamBufferSetTriggerLevelImpl
+
+ PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytesImpl:
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+
+/*-----------------------------------------------------------*/
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ END
diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
index 88c4504..cab1b36 100644
--- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
+++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c
@@ -108,6 +108,13 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Constants used during system call enter and exit.
+ */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the FPU.
*/
#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */
@@ -124,6 +131,14 @@
/*-----------------------------------------------------------*/
/**
+ * @brief Offsets in the stack to the parameters when inside the SVC handler.
+ */
+#define portOFFSET_TO_LR ( 5 )
+#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
+/*-----------------------------------------------------------*/
+
+/**
* @brief Constants required to manipulate the MPU.
*/
#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
@@ -148,6 +163,8 @@
#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */
+#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL )
+
#define portMPU_MAIR_ATTR0_POS ( 0UL )
#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff )
@@ -191,6 +208,30 @@
/* Expected value of the portMPU_TYPE register. */
#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL )
+
+/* Extract first address of the MPU region as encoded in the
+ * RBAR (Region Base Address Register) value. */
+#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
+ ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
+
+/* Extract last address of the MPU region as encoded in the
+ * RLAR (Region Limit Address Register) value. */
+#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
+ ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
+
+/* Does addr lies within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
/*-----------------------------------------------------------*/
/**
@@ -312,6 +353,19 @@
#if ( configENABLE_MPU == 1 )
/**
+ * @brief Extract MPU region's access permissions from the Region Base Address
+ * Register (RBAR) value.
+ *
+ * @param ulRBARValue RBAR value for the MPU region.
+ *
+ * @return uint32_t Access permissions.
+ */
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
+#endif /* configENABLE_MPU */
+
+#if ( configENABLE_MPU == 1 )
+
+/**
* @brief Setup the Memory Protection Unit (MPU).
*/
static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
@@ -365,6 +419,60 @@
* @brief C part of SVC handler.
*/
portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configENABLE_MPU == 1 )
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -682,6 +790,26 @@
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
+ static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
+ {
+ uint32_t ulAccessPermissions = 0;
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
+ {
+ ulAccessPermissions = tskMPU_READ_PERMISSION;
+ }
+
+ if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
+ {
+ ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
+
+ return ulAccessPermissions;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
+#if ( configENABLE_MPU == 1 )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
@@ -853,7 +981,7 @@
void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
{
- #if ( configENABLE_MPU == 1 )
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
@@ -865,7 +993,7 @@
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* defined( __ARMCC_VERSION ) */
- #endif /* configENABLE_MPU */
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
uint32_t ulPC;
@@ -880,7 +1008,7 @@
/* Register are stored on the stack in the following order - R0, R1, R2, R3,
* R12, LR, PC, xPSR. */
- ulPC = pulCallerStackAddress[ 6 ];
+ ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
switch( ucSVCNumber )
@@ -951,18 +1079,18 @@
vRestoreContextOfFirstTask();
break;
- #if ( configENABLE_MPU == 1 )
- case portSVC_RAISE_PRIVILEGE:
+ #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
+ case portSVC_RAISE_PRIVILEGE:
- /* Only raise the privilege, if the svc was raised from any of
- * the system calls. */
- if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
- ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
- {
- vRaisePrivilege();
- }
- break;
- #endif /* configENABLE_MPU */
+ /* Only raise the privilege, if the svc was raised from any of
+ * the system calls. */
+ if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ vRaisePrivilege();
+ }
+ break;
+ #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default:
/* Incorrect SVC call. */
@@ -971,51 +1099,455 @@
}
/*-----------------------------------------------------------*/
-/* *INDENT-OFF* */
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Store the value of the LR and PSPLIM registers before the SVC was raised.
+ * We need to restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+ __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " bics r0, r1 \n" /* Clear nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i;
+ #if defined( __ARMCC_VERSION )
+ /* Declaration when these variable are defined in code instead of being
+ * exported from linker scripts. */
+ extern uint32_t * __syscalls_flash_start__;
+ extern uint32_t * __syscalls_flash_end__;
+ #else
+ /* Declaration when these variable are exported from linker scripts. */
+ extern uint32_t __syscalls_flash_start__[];
+ extern uint32_t __syscalls_flash_end__[];
+ #endif /* #if defined( __ARMCC_VERSION ) */
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ {
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ __asm volatile (
+ " vpush {s0} \n" /* Trigger lazy stacking. */
+ " vpop {s0} \n" /* Nullify the affect of the above instruction. */
+ ::: "memory"
+ );
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+ }
+ #else
+ {
+ ulStackFrameSize = 8;
+ }
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
+
+ /* Restore the LR and PSPLIM to what they were at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+ __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm volatile (
+ " mrs r0, control \n" /* Obtain current control value. */
+ " movs r1, #1 \n" /* r1 = 1. */
+ " orrs r0, r1 \n" /* Set nPRIV bit. */
+ " msr control, r0 \n" /* Write back new control value. */
+ ::: "r0", "r1", "memory"
+ );
+ }
+}
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
#if ( configENABLE_MPU == 1 )
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters,
- BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */
-#else
- StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
- StackType_t * pxEndOfStack,
- TaskFunction_t pxCode,
- void * pvParameters ) /* PRIVILEGED_FUNCTION */
-#endif /* configENABLE_MPU */
-/* *INDENT-ON* */
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
+#if( configENABLE_MPU == 1 )
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters,
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
+{
+ uint32_t ulIndex = 0;
+
+ xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
+ ulIndex++;
+
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
+ ulIndex++;
+
+ #if ( configENABLE_TRUSTZONE == 1 )
+ {
+ xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
+ ulIndex++;
+ }
+ #endif /* configENABLE_TRUSTZONE */
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ ulIndex++;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
+ ulIndex++;
+ if( xRunPrivileged == pdTRUE )
+ {
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ else
+ {
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
+ ulIndex++;
+ }
+ xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
+ ulIndex++;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
+ ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ return &( xMPUSettings->ulContext[ ulIndex ] );
+}
+
+#else /* configENABLE_MPU */
+
+StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
+ StackType_t * pxEndOfStack,
+ TaskFunction_t pxCode,
+ void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
*pxTopOfStack = portINITIAL_EXC_RETURN;
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1029,55 +1561,39 @@
#else /* portPRELOAD_REGISTERS */
{
pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
+ *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
+ *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
+ *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */
+ *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */
+ *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */
+ *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */
+ *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
+ *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */
+ *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */
+ *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */
+ *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */
+ *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */
+ *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */
+ *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */
+ *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
pxTopOfStack--;
- *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */
+ *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */
-
- #if ( configENABLE_MPU == 1 )
- {
- pxTopOfStack--;
-
- if( xRunPrivileged == pdTRUE )
- {
- *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- else
- {
- *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */
- }
- }
- #endif /* configENABLE_MPU */
-
+ *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
@@ -1092,6 +1608,8 @@
return pxTopOfStack;
}
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
@@ -1347,6 +1865,54 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+ BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+ {
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
+ {
+ /* Is the MPU region enabled? */
+ if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
+ portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
+ portIS_AUTHORIZED( ulAccessRequested,
+ prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+ }
+#endif /* configENABLE_MPU */
+/*-----------------------------------------------------------*/
+
BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
index 581b84d..ec52025 100644
--- a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
+++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s
@@ -32,9 +32,18 @@
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
+#ifndef configUSE_MPU_WRAPPERS_V1
+ #define configUSE_MPU_WRAPPERS_V1 0
+#endif
+
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+ EXTERN vSystemCallEnter
+ EXTERN vSystemCallEnter_1
+ EXTERN vSystemCallExit
+#endif
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
@@ -79,48 +88,79 @@
THUMB
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+vRestoreContextOfFirstTask:
+ program_mpu_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context_first_task:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs_first_task:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs_first_task:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+
+ restore_context_done_first_task:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ mov r0, #0
+ msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
- msr psplim, r1 /* Set this task's PSPLIM value. */
- msr control, r2 /* Set this task's CONTROL value. */
- adds r0, #32 /* Discard everything up to r0. */
- msr psp, r0 /* This is now the new top of stack to use in the task. */
- isb
- mov r0, #0
- msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
- bx r3 /* Finally, branch to EXC_RETURN. */
-#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
@@ -131,6 +171,7 @@
mov r0, #0
msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */
bx r2 /* Finally, branch to EXC_RETURN. */
+
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@@ -169,6 +210,114 @@
bx lr /* Return. */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+PendSV_Handler:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+ ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */
+ mrs r2, psp /* r2 = PSP. */
+
+ save_general_regs:
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ stmia r1!, {r4-r11} /* Store r4-r11. */
+ ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */
+ stmia r1!, {r4-r11} /* Store the hardware saved context. */
+
+ save_special_regs:
+ mrs r3, psplim /* r3 = PSPLIM. */
+ mrs r4, control /* r4 = CONTROL. */
+ stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */
+ str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */
+
+ select_next_task:
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+ msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
+ dsb
+ isb
+ bl vTaskSwitchContext
+ mov r0, #0 /* r0 = 0. */
+ msr basepri, r0 /* Enable interrupts. */
+
+ program_mpu:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB. */
+
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */
+ str r2, [r1] /* Disable MPU. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */
+ ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */
+ ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
+ str r1, [r2] /* Program MAIR0. */
+
+ adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */
+ ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */
+ ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
+
+ movs r3, #4 /* r3 = 4. */
+ str r3, [r1] /* Program RNR = 4. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+
+ #if ( configTOTAL_MPU_REGIONS == 16 )
+ movs r3, #8 /* r3 = 8. */
+ str r3, [r1] /* Program RNR = 8. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ movs r3, #12 /* r3 = 12. */
+ str r3, [r1] /* Program RNR = 12. */
+ ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
+ stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
+ #endif /* configTOTAL_MPU_REGIONS == 16 */
+
+ ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */
+ ldr r2, [r1] /* Read the value of MPU_CTRL. */
+ orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */
+ str r2, [r1] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ restore_context:
+ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
+ ldr r0, [r2] /* r0 = pxCurrentTCB.*/
+ ldr r1, [r0] /* r1 = Location of saved context in TCB. */
+
+ restore_special_regs:
+ ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */
+ msr psp, r2
+ msr psplim, r3
+ msr control, r4
+
+ restore_general_regs:
+ ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */
+ stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r4-r11} /* r4-r11 restored. */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
+ #endif /* configENABLE_FPU || configENABLE_MVE */
+
+ restore_context_done:
+ str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
+
+#else /* configENABLE_MPU */
+
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@@ -176,16 +325,10 @@
it eq
vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
-#if ( configENABLE_MPU == 1 )
- mrs r1, psplim /* r1 = PSPLIM. */
- mrs r2, control /* r2 = CONTROL. */
- mov r3, lr /* r3 = LR/EXC_RETURN. */
- stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
-#else /* configENABLE_MPU */
+
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
-#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
@@ -203,37 +346,7 @@
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
-#if ( configENABLE_MPU == 1 )
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
- str r4, [r2] /* Disable MPU. */
-
- adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
- ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
- ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
- str r3, [r2] /* Program MAIR0. */
- ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
- movs r3, #4 /* r3 = 4. */
- str r3, [r2] /* Program RNR = 4. */
- adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
- ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
- ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
- stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
-
- ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
- ldr r4, [r2] /* Read the value of MPU_CTRL. */
- orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
- str r4, [r2] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
-#endif /* configENABLE_MPU */
-
-#if ( configENABLE_MPU == 1 )
- ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
-#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
-#endif /* configENABLE_MPU */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
@@ -241,22 +354,53 @@
vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
- #if ( configENABLE_MPU == 1 )
- msr psplim, r1 /* Restore the PSPLIM register value for the task. */
- msr control, r2 /* Restore the CONTROL register value for the task. */
-#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
-#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
+
+#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
+
+SVC_Handler:
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
+ beq syscall_enter
+ cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
+ beq syscall_enter_1
+ cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
+ beq syscall_exit
+ b vPortSVCHandler_C
+
+ syscall_enter:
+ mov r1, lr
+ b vSystemCallEnter
+
+ syscall_enter_1:
+ mov r1, lr
+ b vSystemCallEnter_1
+
+ syscall_exit:
+ mov r1, lr
+ b vSystemCallExit
+
+#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
+
+#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
END
diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
index c2ca5fa..65ac109 100644
--- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
+++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h
@@ -186,23 +186,120 @@
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
-/**
- * @brief Settings to define an MPU region.
- */
-typedef struct MPURegionSettings
-{
- uint32_t ulRBAR; /**< RBAR for the region. */
- uint32_t ulRLAR; /**< RLAR for the region. */
-} MPURegionSettings_t;
+#if ( configENABLE_MPU == 1 )
-/**
- * @brief MPU settings as stored in the TCB.
- */
-typedef struct MPU_SETTINGS
-{
- uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
- MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
-} xMPU_SETTINGS;
+ /**
+ * @brief Settings to define an MPU region.
+ */
+ typedef struct MPURegionSettings
+ {
+ uint32_t ulRBAR; /**< RBAR for the region. */
+ uint32_t ulRLAR; /**< RLAR for the region. */
+ } MPURegionSettings_t;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ /**
+ * @brief System call stack.
+ */
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulSystemCallStackLimit;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ uint32_t ulStackLimitRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+ #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
+
+ /**
+ * @brief MPU settings as stored in the TCB.
+ */
+ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+------------------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><-----------------------------><---->
+ * 16 16 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 54
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | | | PC, xPSR | EXC_RETURN | |
+ * +-----------+---------------+----------+-----------------+----------------------+-----+
+ *
+ * <-----------><--------------><---------><----------------><---------------------><---->
+ * 16 16 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 53
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ #if( configENABLE_TRUSTZONE == 1 )
+
+ /*
+ * +----------+-----------------+------------------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | |
+ * | | PC, xPSR | CONTROL, EXC_RETURN | |
+ * +----------+-----------------+------------------------------+-----+
+ *
+ * <---------><----------------><------------------------------><---->
+ * 8 8 5 1
+ */
+ #define MAX_CONTEXT_SIZE 22
+
+ #else /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ /*
+ * +----------+-----------------+----------------------+-----+
+ * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | |
+ * | | PC, xPSR | EXC_RETURN | |
+ * +----------+-----------------+----------------------+-----+
+ *
+ * <---------><----------------><----------------------><---->
+ * 8 8 4 1
+ */
+ #define MAX_CONTEXT_SIZE 21
+
+ #endif /* #if( configENABLE_TRUSTZONE == 1 ) */
+
+ #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
+
+ /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+ #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+ #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
+ typedef struct MPU_SETTINGS
+ {
+ uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
+ MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
+ } xMPU_SETTINGS;
+
+#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
/**
@@ -223,6 +320,9 @@
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
+#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
/**
@@ -315,6 +415,20 @@
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
+#if ( configENABLE_MPU == 1 )
+
+ extern BaseType_t xPortIsTaskPrivileged( void );
+
+ /**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+ #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+
+#endif /* configENABLE_MPU == 1 */
+/*-----------------------------------------------------------*/
+
/**
* @brief Barriers.
*/
diff --git a/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
new file mode 100644
index 0000000..aa1e825
--- /dev/null
+++ b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c
@@ -0,0 +1,1993 @@
+/*
+ * FreeRTOS Kernel <DEVELOPMENT BRANCH>
+ * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+ * all the API functions to use the MPU wrappers. That should only be done when
+ * task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+#include "queue.h"
+#include "timers.h"
+#include "event_groups.h"
+#include "stream_buffer.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+#if ( INCLUDE_xTaskDelayUntil == 1 )
+
+BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskDelayUntilImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskDelayUntil_Unpriv
+MPU_xTaskDelayUntil_Priv
+ pop {r0}
+ b MPU_xTaskDelayUntilImpl
+MPU_xTaskDelayUntil_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskDelayUntilImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskAbortDelay == 1 )
+
+BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskAbortDelayImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskAbortDelay_Unpriv
+MPU_xTaskAbortDelay_Priv
+ pop {r0}
+ b MPU_xTaskAbortDelayImpl
+MPU_xTaskAbortDelay_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskAbortDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskDelay == 1 )
+
+void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskDelayImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskDelay_Unpriv
+MPU_vTaskDelay_Priv
+ pop {r0}
+ b MPU_vTaskDelayImpl
+MPU_vTaskDelay_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskDelayImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_vTaskDelay == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskPriorityGet == 1 )
+
+UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskPriorityGetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskPriorityGet_Unpriv
+MPU_uxTaskPriorityGet_Priv
+ pop {r0}
+ b MPU_uxTaskPriorityGetImpl
+MPU_uxTaskPriorityGet_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskPriorityGetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_eTaskGetState == 1 )
+
+eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_eTaskGetStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_eTaskGetState_Unpriv
+MPU_eTaskGetState_Priv
+ pop {r0}
+ b MPU_eTaskGetStateImpl
+MPU_eTaskGetState_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_eTaskGetStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_eTaskGetState == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskGetInfo( TaskHandle_t xTask,
+ TaskStatus_t * pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskGetInfoImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskGetInfo_Unpriv
+MPU_vTaskGetInfo_Priv
+ pop {r0}
+ b MPU_vTaskGetInfoImpl
+MPU_vTaskGetInfo_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskGetInfoImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
+
+TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetIdleTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetIdleTaskHandle_Unpriv
+MPU_xTaskGetIdleTaskHandle_Priv
+ pop {r0}
+ b MPU_xTaskGetIdleTaskHandleImpl
+MPU_xTaskGetIdleTaskHandle_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetIdleTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSuspendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSuspend_Unpriv
+MPU_vTaskSuspend_Priv
+ pop {r0}
+ b MPU_vTaskSuspendImpl
+MPU_vTaskSuspend_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSuspendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_vTaskSuspend == 1 )
+
+void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskResumeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskResume_Unpriv
+MPU_vTaskResume_Priv
+ pop {r0}
+ b MPU_vTaskResumeImpl
+MPU_vTaskResume_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskResumeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
+/*-----------------------------------------------------------*/
+
+TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTaskGetTickCount( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetTickCountImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetTickCount_Unpriv
+MPU_xTaskGetTickCount_Priv
+ pop {r0}
+ b MPU_xTaskGetTickCountImpl
+MPU_xTaskGetTickCount_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetTickCountImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetNumberOfTasksImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetNumberOfTasks_Unpriv
+MPU_uxTaskGetNumberOfTasks_Priv
+ pop {r0}
+ b MPU_uxTaskGetNumberOfTasksImpl
+MPU_uxTaskGetNumberOfTasks_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetNumberOfTasksImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL;
+
+__asm char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcTaskGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTaskGetName_Unpriv
+MPU_pcTaskGetName_Priv
+ pop {r0}
+ b MPU_pcTaskGetNameImpl
+MPU_pcTaskGetName_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTaskGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetRunTimeCounterImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimeCounter_Unpriv
+MPU_ulTaskGetRunTimeCounter_Priv
+ pop {r0}
+ b MPU_ulTaskGetRunTimeCounterImpl
+MPU_ulTaskGetRunTimeCounter_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configGENERATE_RUN_TIME_STATS == 1 )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetRunTimePercentImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetRunTimePercent_Unpriv
+MPU_ulTaskGetRunTimePercent_Priv
+ pop {r0}
+ b MPU_ulTaskGetRunTimePercentImpl
+MPU_ulTaskGetRunTimePercent_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetIdleRunTimePercentImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimePercent_Unpriv
+MPU_ulTaskGetIdleRunTimePercent_Priv
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimePercentImpl
+MPU_ulTaskGetIdleRunTimePercent_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimePercentImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
+
+configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL;
+
+__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGetIdleRunTimeCounterImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+MPU_ulTaskGetIdleRunTimeCounter_Priv
+ pop {r0}
+ b MPU_ulTaskGetIdleRunTimeCounterImpl
+MPU_ulTaskGetIdleRunTimeCounter_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGetIdleRunTimeCounterImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetApplicationTaskTagImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetApplicationTaskTag_Unpriv
+MPU_vTaskSetApplicationTaskTag_Priv
+ pop {r0}
+ b MPU_vTaskSetApplicationTaskTagImpl
+MPU_vTaskSetApplicationTaskTag_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )
+
+TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetApplicationTaskTagImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetApplicationTaskTag_Unpriv
+MPU_xTaskGetApplicationTaskTag_Priv
+ pop {r0}
+ b MPU_xTaskGetApplicationTaskTagImpl
+MPU_xTaskGetApplicationTaskTag_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetApplicationTaskTagImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void * pvValue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetThreadLocalStoragePointerImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+MPU_vTaskSetThreadLocalStoragePointer_Priv
+ pop {r0}
+ b MPU_vTaskSetThreadLocalStoragePointerImpl
+MPU_vTaskSetThreadLocalStoragePointer_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
+
+void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) FREERTOS_SYSTEM_CALL;
+
+__asm void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pvTaskGetThreadLocalStoragePointerImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+MPU_pvTaskGetThreadLocalStoragePointer_Priv
+ pop {r0}
+ b MPU_pvTaskGetThreadLocalStoragePointerImpl
+MPU_pvTaskGetThreadLocalStoragePointer_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTaskGetThreadLocalStoragePointerImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetSystemStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetSystemState_Unpriv
+MPU_uxTaskGetSystemState_Priv
+ pop {r0}
+ b MPU_uxTaskGetSystemStateImpl
+MPU_uxTaskGetSystemState_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetSystemStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
+
+UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetStackHighWaterMarkImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark_Unpriv
+MPU_uxTaskGetStackHighWaterMark_Priv
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMarkImpl
+MPU_uxTaskGetStackHighWaterMark_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMarkImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
+
+configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
+
+__asm configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTaskGetStackHighWaterMark2Impl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTaskGetStackHighWaterMark2_Unpriv
+MPU_uxTaskGetStackHighWaterMark2_Priv
+ pop {r0}
+ b MPU_uxTaskGetStackHighWaterMark2Impl
+MPU_uxTaskGetStackHighWaterMark2_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTaskGetStackHighWaterMark2Impl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
+
+TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetCurrentTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetCurrentTaskHandle_Unpriv
+MPU_xTaskGetCurrentTaskHandle_Priv
+ pop {r0}
+ b MPU_xTaskGetCurrentTaskHandleImpl
+MPU_xTaskGetCurrentTaskHandle_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetCurrentTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( INCLUDE_xTaskGetSchedulerState == 1 )
+
+BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGetSchedulerState( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGetSchedulerStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGetSchedulerState_Unpriv
+MPU_xTaskGetSchedulerState_Priv
+ pop {r0}
+ b MPU_xTaskGetSchedulerStateImpl
+MPU_xTaskGetSchedulerState_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGetSchedulerStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */
+/*-----------------------------------------------------------*/
+
+void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTaskSetTimeOutStateImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTaskSetTimeOutState_Unpriv
+MPU_vTaskSetTimeOutState_Priv
+ pop {r0}
+ b MPU_vTaskSetTimeOutStateImpl
+MPU_vTaskSetTimeOutState_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTaskSetTimeOutStateImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
+ TickType_t * const pxTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskCheckForTimeOutImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskCheckForTimeOut_Unpriv
+MPU_xTaskCheckForTimeOut_Priv
+ pop {r0}
+ b MPU_xTaskCheckForTimeOutImpl
+MPU_xTaskCheckForTimeOut_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskCheckForTimeOutImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
+ UBaseType_t uxIndexToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotify_Unpriv
+MPU_xTaskGenericNotify_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyImpl
+MPU_xTaskGenericNotify_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t * pulNotificationValue,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyWaitImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyWait_Unpriv
+MPU_xTaskGenericNotifyWait_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyWaitImpl
+MPU_xTaskGenericNotifyWait_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTaskGenericNotifyWaitImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
+ BaseType_t xClearCountOnExit,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGenericNotifyTakeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyTake_Unpriv
+MPU_ulTaskGenericNotifyTake_Priv
+ pop {r0}
+ b MPU_ulTaskGenericNotifyTakeImpl
+MPU_ulTaskGenericNotifyTake_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTaskGenericNotifyStateClearImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTaskGenericNotifyStateClear_Unpriv
+MPU_xTaskGenericNotifyStateClear_Priv
+ pop {r0}
+ b MPU_xTaskGenericNotifyStateClearImpl
+MPU_xTaskGenericNotifyStateClear_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTaskGenericNotifyStateClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TASK_NOTIFICATIONS == 1 )
+
+uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
+ UBaseType_t uxIndexToClear,
+ uint32_t ulBitsToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_ulTaskGenericNotifyValueClearImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_ulTaskGenericNotifyValueClear_Unpriv
+MPU_ulTaskGenericNotifyValueClear_Priv
+ pop {r0}
+ b MPU_ulTaskGenericNotifyValueClearImpl
+MPU_ulTaskGenericNotifyValueClear_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_ulTaskGenericNotifyValueClearImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue,
+ const void * const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGenericSendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGenericSend_Unpriv
+MPU_xQueueGenericSend_Priv
+ pop {r0}
+ b MPU_xQueueGenericSendImpl
+MPU_xQueueGenericSend_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGenericSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxQueueMessagesWaitingImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueMessagesWaiting_Unpriv
+MPU_uxQueueMessagesWaiting_Priv
+ pop {r0}
+ b MPU_uxQueueMessagesWaitingImpl
+MPU_uxQueueMessagesWaiting_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueMessagesWaitingImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxQueueSpacesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxQueueSpacesAvailable_Unpriv
+MPU_uxQueueSpacesAvailable_Priv
+ pop {r0}
+ b MPU_uxQueueSpacesAvailableImpl
+MPU_uxQueueSpacesAvailable_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxQueueSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueReceiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueReceive_Unpriv
+MPU_xQueueReceive_Priv
+ pop {r0}
+ b MPU_xQueueReceiveImpl
+MPU_xQueueReceive_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueuePeekImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueuePeek_Unpriv
+MPU_xQueuePeek_Priv
+ pop {r0}
+ b MPU_xQueuePeekImpl
+MPU_xQueuePeek_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueuePeekImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueSemaphoreTakeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSemaphoreTake_Unpriv
+MPU_xQueueSemaphoreTake_Priv
+ pop {r0}
+ b MPU_xQueueSemaphoreTakeImpl
+MPU_xQueueSemaphoreTake_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSemaphoreTakeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
+
+TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGetMutexHolderImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGetMutexHolder_Unpriv
+MPU_xQueueGetMutexHolder_Priv
+ pop {r0}
+ b MPU_xQueueGetMutexHolderImpl
+MPU_xQueueGetMutexHolder_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGetMutexHolderImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueTakeMutexRecursiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueTakeMutexRecursive_Unpriv
+MPU_xQueueTakeMutexRecursive_Priv
+ pop {r0}
+ b MPU_xQueueTakeMutexRecursiveImpl
+MPU_xQueueTakeMutexRecursive_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueTakeMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_RECURSIVE_MUTEXES == 1 )
+
+BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueGiveMutexRecursiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueGiveMutexRecursive_Unpriv
+MPU_xQueueGiveMutexRecursive_Priv
+ pop {r0}
+ b MPU_xQueueGiveMutexRecursiveImpl
+MPU_xQueueGiveMutexRecursive_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueGiveMutexRecursiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueSelectFromSetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueSelectFromSet_Unpriv
+MPU_xQueueSelectFromSet_Priv
+ pop {r0}
+ b MPU_xQueueSelectFromSetImpl
+MPU_xQueueSelectFromSet_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueSelectFromSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_QUEUE_SETS == 1 )
+
+BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xQueueAddToSetImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xQueueAddToSet_Unpriv
+MPU_xQueueAddToSet_Priv
+ pop {r0}
+ b MPU_xQueueAddToSetImpl
+MPU_xQueueAddToSet_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xQueueAddToSetImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vQueueAddToRegistry( QueueHandle_t xQueue,
+ const char * pcName ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vQueueAddToRegistryImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueAddToRegistry_Unpriv
+MPU_vQueueAddToRegistry_Priv
+ pop {r0}
+ b MPU_vQueueAddToRegistryImpl
+MPU_vQueueAddToRegistry_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueAddToRegistryImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vQueueUnregisterQueueImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vQueueUnregisterQueue_Unpriv
+MPU_vQueueUnregisterQueue_Priv
+ pop {r0}
+ b MPU_vQueueUnregisterQueueImpl
+MPU_vQueueUnregisterQueue_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vQueueUnregisterQueueImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configQUEUE_REGISTRY_SIZE > 0 )
+
+const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
+
+__asm const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcQueueGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcQueueGetName_Unpriv
+MPU_pcQueueGetName_Priv
+ pop {r0}
+ b MPU_pcQueueGetNameImpl
+MPU_pcQueueGetName_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcQueueGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pvTimerGetTimerIDImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pvTimerGetTimerID_Unpriv
+MPU_pvTimerGetTimerID_Priv
+ pop {r0}
+ b MPU_pvTimerGetTimerIDImpl
+MPU_pvTimerGetTimerID_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pvTimerGetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTimerSetTimerID( TimerHandle_t xTimer,
+ void * pvNewID ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTimerSetTimerIDImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetTimerID_Unpriv
+MPU_vTimerSetTimerID_Priv
+ pop {r0}
+ b MPU_vTimerSetTimerIDImpl
+MPU_vTimerSetTimerID_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetTimerIDImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerIsTimerActiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerIsTimerActive_Unpriv
+MPU_xTimerIsTimerActive_Priv
+ pop {r0}
+ b MPU_xTimerIsTimerActiveImpl
+MPU_xTimerIsTimerActive_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerIsTimerActiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL;
+
+__asm TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetTimerDaemonTaskHandleImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+MPU_xTimerGetTimerDaemonTaskHandle_Priv
+ pop {r0}
+ b MPU_xTimerGetTimerDaemonTaskHandleImpl
+MPU_xTimerGetTimerDaemonTaskHandle_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetTimerDaemonTaskHandleImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t * const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGenericCommandImpl
+
+ push {r0}
+ mrs r0, ipsr
+ cmp r0, #0
+ bne MPU_xTimerGenericCommand_Priv
+ mrs r0, control
+ tst r0, #1
+ beq MPU_xTimerGenericCommand_Priv
+MPU_xTimerGenericCommand_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xTimerGenericCommandImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+MPU_xTimerGenericCommand_Priv
+ pop {r0}
+ b MPU_xTimerGenericCommandImpl
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_pcTimerGetNameImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_pcTimerGetName_Unpriv
+MPU_pcTimerGetName_Priv
+ pop {r0}
+ b MPU_pcTimerGetNameImpl
+MPU_pcTimerGetName_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_pcTimerGetNameImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
+ const BaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vTimerSetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vTimerSetReloadMode_Unpriv
+MPU_vTimerSetReloadMode_Priv
+ pop {r0}
+ b MPU_vTimerSetReloadModeImpl
+MPU_vTimerSetReloadMode_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vTimerSetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetReloadMode_Unpriv
+MPU_xTimerGetReloadMode_Priv
+ pop {r0}
+ b MPU_xTimerGetReloadModeImpl
+MPU_xTimerGetReloadMode_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxTimerGetReloadModeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxTimerGetReloadMode_Unpriv
+MPU_uxTimerGetReloadMode_Priv
+ pop {r0}
+ b MPU_uxTimerGetReloadModeImpl
+MPU_uxTimerGetReloadMode_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxTimerGetReloadModeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetPeriodImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetPeriod_Unpriv
+MPU_xTimerGetPeriod_Priv
+ pop {r0}
+ b MPU_xTimerGetPeriodImpl
+MPU_xTimerGetPeriod_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetPeriodImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TIMERS == 1 )
+
+TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
+
+__asm TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xTimerGetExpiryTimeImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xTimerGetExpiryTime_Unpriv
+MPU_xTimerGetExpiryTime_Priv
+ pop {r0}
+ b MPU_xTimerGetExpiryTimeImpl
+MPU_xTimerGetExpiryTime_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xTimerGetExpiryTimeImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /* if ( configUSE_TIMERS == 1 ) */
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToWaitFor,
+ const BaseType_t xClearOnExit,
+ const BaseType_t xWaitForAllBits,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupWaitBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupWaitBits_Unpriv
+MPU_xEventGroupWaitBits_Priv
+ pop {r0}
+ b MPU_xEventGroupWaitBitsImpl
+MPU_xEventGroupWaitBits_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER_1
+ bl MPU_xEventGroupWaitBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToClear ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupClearBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupClearBits_Unpriv
+MPU_xEventGroupClearBits_Priv
+ pop {r0}
+ b MPU_xEventGroupClearBitsImpl
+MPU_xEventGroupClearBits_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupClearBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupSetBitsImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSetBits_Unpriv
+MPU_xEventGroupSetBits_Priv
+ pop {r0}
+ b MPU_xEventGroupSetBitsImpl
+MPU_xEventGroupSetBits_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSetBitsImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup,
+ const EventBits_t uxBitsToSet,
+ const EventBits_t uxBitsToWaitFor,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xEventGroupSyncImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xEventGroupSync_Unpriv
+MPU_xEventGroupSync_Priv
+ pop {r0}
+ b MPU_xEventGroupSyncImpl
+MPU_xEventGroupSync_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xEventGroupSyncImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL;
+
+__asm UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_uxEventGroupGetNumberImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_uxEventGroupGetNumber_Unpriv
+MPU_uxEventGroupGetNumber_Priv
+ pop {r0}
+ b MPU_uxEventGroupGetNumberImpl
+MPU_uxEventGroupGetNumber_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_uxEventGroupGetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_TRACE_FACILITY == 1 )
+
+void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL;
+
+__asm void MPU_vEventGroupSetNumber( void * xEventGroup,
+ UBaseType_t uxEventGroupNumber ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_vEventGroupSetNumberImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_vEventGroupSetNumber_Unpriv
+MPU_vEventGroupSetNumber_Priv
+ pop {r0}
+ b MPU_vEventGroupSetNumberImpl
+MPU_vEventGroupSetNumber_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_vEventGroupSetNumberImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+
+#endif /*( configUSE_TRACE_FACILITY == 1 )*/
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+ const void * pvTxData,
+ size_t xDataLengthBytes,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSendImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSend_Unpriv
+MPU_xStreamBufferSend_Priv
+ pop {r0}
+ b MPU_xStreamBufferSendImpl
+MPU_xStreamBufferSend_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSendImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+ void * pvRxData,
+ size_t xBufferLengthBytes,
+ TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferReceiveImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferReceive_Unpriv
+MPU_xStreamBufferReceive_Priv
+ pop {r0}
+ b MPU_xStreamBufferReceiveImpl
+MPU_xStreamBufferReceive_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferReceiveImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferIsFullImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsFull_Unpriv
+MPU_xStreamBufferIsFull_Priv
+ pop {r0}
+ b MPU_xStreamBufferIsFullImpl
+MPU_xStreamBufferIsFull_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsFullImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferIsEmptyImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferIsEmpty_Unpriv
+MPU_xStreamBufferIsEmpty_Priv
+ pop {r0}
+ b MPU_xStreamBufferIsEmptyImpl
+MPU_xStreamBufferIsEmpty_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferIsEmptyImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSpacesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSpacesAvailable_Unpriv
+MPU_xStreamBufferSpacesAvailable_Priv
+ pop {r0}
+ b MPU_xStreamBufferSpacesAvailableImpl
+MPU_xStreamBufferSpacesAvailable_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSpacesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferBytesAvailableImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferBytesAvailable_Unpriv
+MPU_xStreamBufferBytesAvailable_Priv
+ pop {r0}
+ b MPU_xStreamBufferBytesAvailableImpl
+MPU_xStreamBufferBytesAvailable_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferBytesAvailableImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL;
+
+__asm BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
+ size_t xTriggerLevel ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferSetTriggerLevelImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferSetTriggerLevel_Unpriv
+MPU_xStreamBufferSetTriggerLevel_Priv
+ pop {r0}
+ b MPU_xStreamBufferSetTriggerLevelImpl
+MPU_xStreamBufferSetTriggerLevel_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferSetTriggerLevelImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
+
+__asm size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */
+{
+ PRESERVE8
+ extern MPU_xStreamBufferNextMessageLengthBytesImpl
+
+ push {r0}
+ mrs r0, control
+ tst r0, #1
+ bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+MPU_xStreamBufferNextMessageLengthBytes_Priv
+ pop {r0}
+ b MPU_xStreamBufferNextMessageLengthBytesImpl
+MPU_xStreamBufferNextMessageLengthBytes_Unpriv
+ pop {r0}
+ svc #portSVC_SYSTEM_CALL_ENTER
+ bl MPU_xStreamBufferNextMessageLengthBytesImpl
+ svc #portSVC_SYSTEM_CALL_EXIT
+ bx lr
+}
+/*-----------------------------------------------------------*/
+
+#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c
index ac35afb..beb52ea 100755
--- a/portable/RVDS/ARM_CM4_MPU/port.c
+++ b/portable/RVDS/ARM_CM4_MPU/port.c
@@ -108,13 +108,34 @@
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
+/* Constants used during system call enter and exit. */
+#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
+#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
+
/* Offsets in the stack to the parameters when inside the SVC handler. */
+#define portOFFSET_TO_LR ( 5 )
#define portOFFSET_TO_PC ( 6 )
+#define portOFFSET_TO_PSR ( 7 )
/* For strict compliance with the Cortex-M spec the task start address should
* have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
+/* Does addr lie within [start, end] address range? */
+#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
+ ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
+
+/* Is the access request satisfied by the available permissions? */
+#define portIS_AUTHORIZED( accessRequest, permissions ) \
+ ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
+
+/* Max value that fits in a uint32_t type. */
+#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
+
+/* Check if adding a and b will result in overflow. */
+#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
+/*-----------------------------------------------------------*/
+
/* Each task maintains its own interrupt status in the critical nesting
* variable. Note this is not saved as part of the task context as context
* switches can only occur when uxCriticalNesting is zero. */
@@ -158,7 +179,7 @@
* C portion of the SVC handler. The SVC handler is split between an asm entry
* and a C wrapper for simplicity of coding and maintenance.
*/
-void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION;
+void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION;
/*
* Function to enable the VFP.
@@ -215,6 +236,61 @@
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
#endif
+
+/**
+ * @brief Triggers lazy stacking of FPU registers.
+ */
+static void prvTriggerLazyStacking( void ) PRIVILEGED_FUNCTION;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with up to 4 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the system call stack so that upon returning from
+ * SVC, the system call stack is used.
+ *
+ * It is used for the system calls with 5 parameters.
+ *
+ * @param pulTaskStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ /**
+ * @brief Sets up the task stack so that upon returning from
+ * SVC, the task stack is used again.
+ *
+ * @param pulSystemCallStack The current SP when the SVC was raised.
+ * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
+ */
+ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/*
@@ -223,43 +299,59 @@
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
- BaseType_t xRunPrivileged )
+ BaseType_t xRunPrivileged,
+ xMPU_SETTINGS * xMPUSettings )
{
- /* Simulate the stack frame as it would be created by a context switch
- * interrupt. */
- pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
- *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
- pxTopOfStack--;
- *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
- pxTopOfStack--;
- *pxTopOfStack = 0; /* LR */
- pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
- *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
-
- /* A save method is being used that requires each task to maintain its
- * own exec return value. */
- pxTopOfStack--;
- *pxTopOfStack = portINITIAL_EXC_RETURN;
-
- pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
-
if( xRunPrivileged == pdTRUE )
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED;
+ xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED;
}
else
{
- *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED;
+ xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
+ xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED;
}
+ xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */
+ xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */
+ xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */
+ xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */
+ xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */
+ xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */
+ xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */
+ xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */
+ xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
- return pxTopOfStack;
+ xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
+ xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */
+ xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */
+ xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */
+ xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */
+ xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */
+ xMPUSettings->ulContext[ 16 ] = 0; /* LR. */
+ xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */
+ xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ {
+ /* Ensure that the system call stack is double word aligned. */
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
+ xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
+ ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
+
+ /* This is not NULL only for the duration of a system call. */
+ xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+ #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+ return &( xMPUSettings->ulContext[ 19 ] );
}
/*-----------------------------------------------------------*/
-void prvSVCHandler( uint32_t * pulParam )
+void vSVCHandler_C( uint32_t * pulParam )
{
uint8_t ucSVCNumber;
- uint32_t ulReg, ulPC;
+ uint32_t ulPC, ulReg;
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
extern uint32_t __syscalls_flash_start__;
@@ -300,11 +392,11 @@
{
__asm
{
-/* *INDENT-OFF* */
+ /* *INDENT-OFF* */
mrs ulReg, control /* Obtain current control value. */
bic ulReg, # 1 /* Set privilege bit. */
msr control, ulReg /* Write back new control value. */
-/* *INDENT-ON* */
+ /* *INDENT-ON* */
}
}
@@ -313,14 +405,14 @@
case portSVC_RAISE_PRIVILEGE:
__asm
{
-/* *INDENT-OFF* */
+ /* *INDENT-OFF* */
mrs ulReg, control /* Obtain current control value. */
bic ulReg, # 1 /* Set privilege bit. */
msr control, ulReg /* Write back new control value. */
-/* *INDENT-ON* */
+ /* *INDENT-ON* */
}
break;
- #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
+ #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
default: /* Unknown SVC call. */
break;
@@ -328,9 +420,339 @@
}
/*-----------------------------------------------------------*/
+__asm void prvTriggerLazyStacking( void ) /* PRIVILEGED_FUNCTION */
+{
+/* *INDENT-OFF* */
+ PRESERVE8
+
+ vpush {s0} /* Trigger lazy stacking. */
+ vpop {s0} /* Nullify the affect of the above instruction. */
+
+/* *INDENT-ON* */
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1;
+ extern uint32_t __syscalls_flash_start__;
+ extern uint32_t __syscalls_flash_end__;
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ prvTriggerLazyStacking();
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm
+ {
+ msr psp, pulSystemCallStack
+ };
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm
+ {
+ mrs r1, control /* Obtain current control value. */
+ bic r1, #1 /* Clear nPRIV bit. */
+ msr control, r1 /* Write back new control value. */
+ };
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulSystemCallStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1;
+ extern uint32_t __syscalls_flash_start__;
+ extern uint32_t __syscalls_flash_end__;
+
+ ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
+
+ /* This is not NULL only for the duration of the system call. */
+ configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ prvTriggerLazyStacking();
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the system call stack for the stack frame and
+ * the parameter passed on the stack. We only need to copy one
+ * parameter but we still reserve 2 spaces to keep the stack
+ * double word aligned. */
+ pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulSystemCallStack[ i ] = pulTaskStack[ i ];
+ }
+
+ /* Copy the parameter which is passed the stack. */
+ if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
+ }
+ else
+ {
+ pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
+ /* Record if the hardware used padding to force the stack pointer
+ * to be double word aligned. */
+ pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
+ }
+
+ /* Use the pulSystemCallStack in thread mode. */
+ __asm
+ {
+ msr psp, pulSystemCallStack
+ };
+
+ /* Raise the privilege for the duration of the system call. */
+ __asm
+ {
+ mrs r1, control /* Obtain current control value. */
+ bic r1, #1 /* Clear nPRIV bit. */
+ msr control, r1 /* Write back new control value. */
+ };
+
+ /* Remember the location where we should copy the stack frame when we exit from
+ * the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
+
+ /* Store the value of the Link Register before the SVC was raised. We need to
+ * restore it when we exit from the system call. */
+ pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
+
+ /* We ensure in pxPortInitialiseStack that the system call stack is
+ * double word aligned and therefore, there is no need of padding.
+ * Clear the bit[9] of stacked xPSR. */
+ pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
+{
+ extern TaskHandle_t pxCurrentTCB;
+ xMPU_SETTINGS * pxMpuSettings;
+ uint32_t * pulTaskStack;
+ uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1;
+ extern uint32_t __syscalls_flash_start__;
+ extern uint32_t __syscalls_flash_end__;
+
+ ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
+
+ /* If the request did not come from the system call section, do nothing. */
+ if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
+ ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
+ {
+ pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
+ pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
+
+ if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
+ {
+ /* Extended frame i.e. FPU in use. */
+ ulStackFrameSize = 26;
+ prvTriggerLazyStacking();
+ }
+ else
+ {
+ /* Standard frame i.e. FPU not in use. */
+ ulStackFrameSize = 8;
+ }
+
+ /* Make space on the task stack for the stack frame. */
+ pulTaskStack = pulTaskStack - ulStackFrameSize;
+
+ /* Copy the stack frame. */
+ for( i = 0; i < ulStackFrameSize; i++ )
+ {
+ pulTaskStack[ i ] = pulSystemCallStack[ i ];
+ }
+
+ /* Use the pulTaskStack in thread mode. */
+ __asm
+ {
+ msr psp, pulTaskStack
+ };
+
+ /* Drop the privilege before returning to the thread mode. */
+ __asm
+ {
+ mrs r1, control /* Obtain current control value. */
+ orr r1, #1 /* Set nPRIV bit. */
+ msr control, r1 /* Write back new control value. */
+ };
+
+ /* Restore the stacked link register to what it was at the time of
+ * system call entry. */
+ pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
+
+ /* If the hardware used padding to force the stack pointer
+ * to be double word aligned, set the stacked xPSR bit[9],
+ * otherwise clear it. */
+ if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
+ }
+ else
+ {
+ pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
+ }
+
+ /* This is not NULL only for the duration of the system call. */
+ pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
+ }
+}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
+{
+ BaseType_t xTaskIsPrivileged = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xTaskIsPrivileged = pdTRUE;
+ }
+
+ return xTaskIsPrivileged;
+}
+/*-----------------------------------------------------------*/
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
__asm void vPortSVCHandler( void )
{
- extern prvSVCHandler
+ extern vSVCHandler_C
+ extern vSystemCallEnter
+ extern vSystemCallEnter_1
+ extern vSystemCallExit
+
+/* *INDENT-OFF* */
+ PRESERVE8
+
+ tst lr, #4
+ ite eq
+ mrseq r0, msp
+ mrsne r0, psp
+
+ ldr r1, [r0, #24]
+ ldrb r2, [r1, #-2]
+ cmp r2, #portSVC_SYSTEM_CALL_ENTER
+ beq syscall_enter
+ cmp r2, #portSVC_SYSTEM_CALL_ENTER_1
+ beq syscall_enter_1
+ cmp r2, #portSVC_SYSTEM_CALL_EXIT
+ beq syscall_exit
+ b vSVCHandler_C
+
+syscall_enter
+ mov r1, lr
+ b vSystemCallEnter
+
+syscall_enter_1
+ mov r1, lr
+ b vSystemCallEnter_1
+
+syscall_exit
+ mov r1, lr
+ b vSystemCallExit
+/* *INDENT-ON* */
+}
+
+#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+__asm void vPortSVCHandler( void )
+{
+ extern vSVCHandler_C
/* *INDENT-OFF* */
PRESERVE8
@@ -345,9 +767,11 @@
mrs r0, psp
#endif
- b prvSVCHandler
+ b vSVCHandler_C
/* *INDENT-ON* */
}
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
__asm void prvRestoreContextOfFirstTask( void )
@@ -355,45 +779,54 @@
/* *INDENT-OFF* */
PRESERVE8
- ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
- ldr r0, [ r0 ]
- ldr r0, [ r0 ]
- msr msp, r0 /* Set the msp back to the start of the stack. */
- ldr r3, =pxCurrentTCB /* Restore the context. */
- ldr r1, [ r3 ]
- ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */
- add r1, r1, #4 /* Move onto the second item in the TCB... */
+ ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */
+ ldr r0, [r0]
+ ldr r0, [r0]
+ msr msp, r0 /* Set the msp back to the start of the stack. */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [ r2 ] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- ldr r2, =0xe000ed9c /* Region Base Address register. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
+
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
#if ( configTOTAL_MPU_REGIONS == 16 )
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
#endif /* configTOTAL_MPU_REGIONS == 16. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [ r2 ] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
- ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
msr control, r3
- msr psp, r0 /* Restore the task stack pointer. */
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+
mov r0, #0
msr basepri, r0
- bx r14
- nop
+ bx lr
/* *INDENT-ON* */
}
/*-----------------------------------------------------------*/
@@ -650,72 +1083,90 @@
/* *INDENT-OFF* */
PRESERVE8
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location where the context should be saved. */
+
+ /*------------ Save Context. ----------- */
+ mrs r3, control
mrs r0, psp
+ isb
- ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
- ldr r2, [ r3 ]
+ add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */
+ tst lr, #0x10
+ ittt eq
+ vstmiaeq r1!, {s16-s31} /* Store s16-s31. */
+ vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */
+ vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */
+ sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */
- tst r14, #0x10 /* Is the task using the FPU context? If so, push high vfp registers. */
- it eq
- vstmdbeq r0 !, { s16 - s31 }
+ stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */
+ ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */
+ stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */
+ str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */
- mrs r1, control
- stmdb r0 !, { r1, r4 - r11, r14 } /* Save the remaining registers. */
- str r0, [ r2 ] /* Save the new top of stack into the first member of the TCB. */
-
- stmdb sp !, { r0, r3 }
- mov r0, # configMAX_SYSCALL_INTERRUPT_PRIORITY
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+ /*---------- Select next task. --------- */
+ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
msr basepri, r0
dsb
isb
- #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
- cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
- #endif
+#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
+ cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */
+#endif
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
- ldmia sp !, { r0, r3 }
- /* Restore the context. */
- ldr r1, [ r3 ]
- ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */
- add r1, r1, #4 /* Move onto the second item in the TCB... */
- dmb /* Complete outstanding transfers before disabling MPU. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
- str r3, [ r2 ] /* Disable MPU. */
+ /*------------ Program MPU. ------------ */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */
- ldr r2, =0xe000ed9c /* Region Base Address register. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ dmb /* Complete outstanding transfers before disabling MPU. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */
+ str r3, [r0] /* Disable MPU. */
- #if ( configTOTAL_MPU_REGIONS == 16 )
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
- ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
- stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
- #endif /* configTOTAL_MPU_REGIONS == 16. */
+ ldr r0, =0xe000ed9c /* Region Base Address register. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */
- ldr r2, =0xe000ed94 /* MPU_CTRL register. */
- ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */
- orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
- str r3, [ r2 ] /* Enable MPU. */
- dsb /* Force memory writes before continuing. */
+#if ( configTOTAL_MPU_REGIONS == 16 )
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */
+ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */
+ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */
+#endif /* configTOTAL_MPU_REGIONS == 16. */
- ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */
+ ldr r0, =0xe000ed94 /* MPU_CTRL register. */
+ ldr r3, [r0] /* Read the value of MPU_CTRL. */
+ orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */
+ str r3, [r0] /* Enable MPU. */
+ dsb /* Force memory writes before continuing. */
+
+ /*---------- Restore Context. ---------- */
+ ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */
+ ldr r2, [r3] /* r2 = pxCurrentTCB. */
+ ldr r1, [r2] /* r1 = Location of saved context in TCB. */
+
+ ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */
+ msr psp, r0
+ stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */
+ ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */
msr control, r3
- tst r14, #0x10 /* Is the task using the FPU context? If so, pop the high vfp registers too. */
- it eq
- vldmiaeq r0 !, { s16 - s31 }
+ tst lr, #0x10
+ ittt eq
+ vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */
+ vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */
+ vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */
- msr psp, r0
- bx r14
- nop
+ str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */
+ bx lr
/* *INDENT-ON* */
}
/*-----------------------------------------------------------*/
@@ -934,11 +1385,19 @@
( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
( portMPU_REGION_ENABLE );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
+
/* Invalidate user configurable regions. */
for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
{
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
}
else
@@ -961,6 +1420,12 @@
( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) +
+ ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
+ xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
+ tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@@ -981,12 +1446,28 @@
( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
+
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
+ if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
+ ( ( xRegions[lIndex].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
+ }
+ if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
+ {
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
+ }
}
else
{
/* Invalidate the region. */
xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID );
xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL;
+ xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
}
lIndex++;
@@ -995,6 +1476,47 @@
}
/*-----------------------------------------------------------*/
+BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
+ uint32_t ulBufferLength,
+ uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
+
+{
+ uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
+ BaseType_t xAccessGranted = pdFALSE;
+ const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
+
+ if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
+ {
+ xAccessGranted = pdTRUE;
+ }
+ else
+ {
+ if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
+ {
+ ulBufferStartAddress = ( uint32_t ) pvBuffer;
+ ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
+
+ for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ )
+ {
+ if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress,
+ xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) &&
+ portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) )
+ {
+ xAccessGranted = pdTRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return xAccessGranted;
+}
+/*-----------------------------------------------------------*/
+
__asm uint32_t prvPortGetIPSR( void )
{
/* *INDENT-OFF* */
diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h
index c7cd562..cc4e136 100644
--- a/portable/RVDS/ARM_CM4_MPU/portmacro.h
+++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h
@@ -193,9 +193,45 @@
uint32_t ulRegionAttribute;
} xMPU_REGION_REGISTERS;
+typedef struct MPU_REGION_SETTINGS
+{
+ uint32_t ulRegionStartAddress;
+ uint32_t ulRegionEndAddress;
+ uint32_t ulRegionPermissions;
+} xMPU_REGION_SETTINGS;
+
+#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+
+ #ifndef configSYSTEM_CALL_STACK_SIZE
+ #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2.
+ #endif
+
+ typedef struct SYSTEM_CALL_STACK_INFO
+ {
+ uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ];
+ uint32_t * pulSystemCallStack;
+ uint32_t * pulTaskStack;
+ uint32_t ulLinkRegisterAtSystemCallEntry;
+ } xSYSTEM_CALL_STACK_INFO;
+
+#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
+
+#define MAX_CONTEXT_SIZE 52
+
+/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */
+#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL )
+#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL )
+
typedef struct MPU_SETTINGS
{
xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ];
+ uint32_t ulContext[ MAX_CONTEXT_SIZE ];
+ uint32_t ulTaskFlags;
+
+ #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
+ xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo;
+ #endif
} xMPU_SETTINGS;
/* Architecture specifics. */
@@ -209,9 +245,12 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
-#define portSVC_START_SCHEDULER 0
-#define portSVC_YIELD 1
-#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_START_SCHEDULER 0
+#define portSVC_YIELD 1
+#define portSVC_RAISE_PRIVILEGE 2
+#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
+#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
+#define portSVC_SYSTEM_CALL_EXIT 5
/* Scheduler utilities. */
@@ -314,6 +353,16 @@
#define portRESET_PRIVILEGE() vResetPrivilege()
/*-----------------------------------------------------------*/
+extern BaseType_t xPortIsTaskPrivileged( void );
+
+/**
+ * @brief Checks whether or not the calling task is privileged.
+ *
+ * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
+ */
+#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged()
+/*-----------------------------------------------------------*/
+
static portFORCE_INLINE void vPortSetBASEPRI( uint32_t ulBASEPRI )
{
__asm
diff --git a/queue.c b/queue.c
index 23e9704..29765a5 100644
--- a/queue.c
+++ b/queue.c
@@ -2194,6 +2194,12 @@
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
+UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
+{
+ return ( ( Queue_t * ) xQueue )->uxItemSize;
+}
+/*-----------------------------------------------------------*/
+
#if ( configUSE_MUTEXES == 1 )
static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
diff --git a/tasks.c b/tasks.c
index ed3cd1e..72200ec 100644
--- a/tasks.c
+++ b/tasks.c
@@ -975,17 +975,17 @@
{
#if ( portSTACK_GROWTH < 0 )
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#else /* portSTACK_GROWTH */
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#endif /* portSTACK_GROWTH */
}
#else /* portHAS_STACK_OVERFLOW_CHECKING */
{
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
}
#endif /* portHAS_STACK_OVERFLOW_CHECKING */
}
@@ -5493,6 +5493,21 @@
}
#endif /* INCLUDE_vTaskSuspend */
}
+/*-----------------------------------------------------------*/
+
+#if ( portUSING_MPU_WRAPPERS == 1 )
+
+ xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
+ {
+ TCB_t * pxTCB;
+
+ pxTCB = prvGetTCBFromHandle( xTask );
+
+ return &( pxTCB->xMPUSettings );
+ }
+
+#endif /* portUSING_MPU_WRAPPERS */
+/*-----------------------------------------------------------*/
/* Code below here allows additional code to be inserted into this source file,
* especially where access to file scope functions and data is needed (for example