arch: arm: aarch32: add ARMv8-R MPU support

ARMv8-R aarch32 processor has support for
ARM PMSAv8-32. To add support for ARMv8-R we reuse the
ARMv8-M effort and change access to the different registers
such as rbar, rlar, mair, prselr.

Signed-off-by: Julien Massot <julien.massot@iot.bzh>
Signed-off-by: Manuel Arguelles <manuel.arguelles@nxp.com>
diff --git a/arch/arm/core/aarch32/mpu/Kconfig b/arch/arm/core/aarch32/mpu/Kconfig
index b0c4215..6c6de92 100644
--- a/arch/arm/core/aarch32/mpu/Kconfig
+++ b/arch/arm/core/aarch32/mpu/Kconfig
@@ -12,7 +12,8 @@
 	select THREAD_STACK_INFO
 	select ARCH_HAS_EXECUTABLE_PAGE_BIT
 	select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT if !(CPU_HAS_NXP_MPU || ARMV8_M_BASELINE || ARMV8_M_MAINLINE)
-	select MPU_REQUIRES_NON_OVERLAPPING_REGIONS if CPU_HAS_ARM_MPU && (ARMV8_M_BASELINE || ARMV8_M_MAINLINE)
+	select MPU_REQUIRES_NON_OVERLAPPING_REGIONS if CPU_HAS_ARM_MPU && (ARMV8_M_BASELINE || ARMV8_M_MAINLINE || AARCH32_ARMV8_R)
+	select MPU_GAP_FILLING if AARCH32_ARMV8_R
 	help
 	  MCU implements Memory Protection Unit.
 
@@ -46,6 +47,7 @@
 config ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
 	int
 	default 256 if ARM_MPU && ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE
+	default 64 if ARM_MPU && AARCH32_ARMV8_R
 	default 32 if ARM_MPU
 	default 4
 	help
diff --git a/arch/arm/core/aarch32/mpu/arm_core_mpu.c b/arch/arm/core/aarch32/mpu/arm_core_mpu.c
index 71d643c..f6f40b7 100644
--- a/arch/arm/core/aarch32/mpu/arm_core_mpu.c
+++ b/arch/arm/core/aarch32/mpu/arm_core_mpu.c
@@ -155,6 +155,8 @@
 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
 }
 
+extern void arm_core_mpu_enable(void);
+extern void arm_core_mpu_disable(void);
 /**
  * @brief Use the HW-specific MPU driver to program
  *        the dynamic MPU regions.
@@ -303,8 +305,14 @@
 #endif /* CONFIG_MPU_STACK_GUARD */
 
 	/* Configure the dynamic MPU regions */
+#ifdef CONFIG_AARCH32_ARMV8_R
+	arm_core_mpu_disable();
+#endif
 	arm_core_mpu_configure_dynamic_mpu_regions(dynamic_regions,
 						   region_num);
+#ifdef CONFIG_AARCH32_ARMV8_R
+	arm_core_mpu_enable();
+#endif
 }
 
 #if defined(CONFIG_USERSPACE)
diff --git a/arch/arm/core/aarch32/mpu/arm_mpu.c b/arch/arm/core/aarch32/mpu/arm_mpu.c
index 85107e3..94b4ac1 100644
--- a/arch/arm/core/aarch32/mpu/arm_mpu.c
+++ b/arch/arm/core/aarch32/mpu/arm_mpu.c
@@ -43,11 +43,12 @@
 	defined(CONFIG_CPU_CORTEX_M3) || \
 	defined(CONFIG_CPU_CORTEX_M4) || \
 	defined(CONFIG_CPU_CORTEX_M7) || \
-	defined(CONFIG_CPU_AARCH32_CORTEX_R)
+	defined(CONFIG_ARMV7_R)
 #include "arm_mpu_v7_internal.h"
 #elif defined(CONFIG_CPU_CORTEX_M23) || \
 	defined(CONFIG_CPU_CORTEX_M33) || \
-	defined(CONFIG_CPU_CORTEX_M55)
+	defined(CONFIG_CPU_CORTEX_M55) || \
+	defined(CONFIG_AARCH32_ARMV8_R)
 #include "arm_mpu_v8_internal.h"
 #else
 #error "Unsupported ARM CPU"
@@ -84,7 +85,7 @@
 
 	/* Populate internal ARM MPU region configuration structure. */
 	region_conf.base = new_region->start;
-#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
+#if defined(CONFIG_ARMV7_R)
 	region_conf.size = size_to_mpu_rasr_size(new_region->size);
 #endif
 	get_region_attr_from_mpu_partition_info(&region_conf.attr,
diff --git a/arch/arm/core/aarch32/mpu/arm_mpu_v8_internal.h b/arch/arm/core/aarch32/mpu/arm_mpu_v8_internal.h
index 2b8dd76..cad4e07 100644
--- a/arch/arm/core/aarch32/mpu/arm_mpu_v8_internal.h
+++ b/arch/arm/core/aarch32/mpu/arm_mpu_v8_internal.h
@@ -28,7 +28,109 @@
  * regions may be configured.
  */
 static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
+#if defined(CONFIG_CPU_CORTEX_M23) || defined(CONFIG_CPU_CORTEX_M33) || \
+	defined(CONFIG_CPU_CORTEX_M55)
+static inline void mpu_set_mair0(uint32_t mair0)
+{
+	MPU->MAIR0 = mair0;
+}
 
+static inline void mpu_set_rnr(uint32_t rnr)
+{
+	MPU->RNR = rnr;
+}
+
+static inline void mpu_set_rbar(uint32_t rbar)
+{
+	MPU->RBAR = rbar;
+}
+
+static inline uint32_t mpu_get_rbar(void)
+{
+	return MPU->RBAR;
+}
+
+static inline void mpu_set_rlar(uint32_t rlar)
+{
+	MPU->RLAR = rlar;
+}
+
+static inline uint32_t mpu_get_rlar(void)
+{
+	return MPU->RLAR;
+}
+
+static inline uint8_t mpu_get_num_regions(void)
+{
+	uint32_t type = MPU->TYPE;
+
+	type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
+
+	return (uint8_t)type;
+}
+
+static inline void mpu_clear_region(uint32_t rnr)
+{
+	ARM_MPU_ClrRegion(rnr);
+}
+
+#elif defined(CONFIG_AARCH32_ARMV8_R)
+static inline void mpu_set_mair0(uint32_t mair0)
+{
+	write_mair0(mair0);
+	__DSB();
+	__ISB();
+}
+
+static inline void mpu_set_rnr(uint32_t rnr)
+{
+	write_prselr(rnr);
+	__DSB();
+}
+
+static inline void mpu_set_rbar(uint32_t rbar)
+{
+	write_prbar(rbar);
+	__DSB();
+	__ISB();
+}
+
+static inline uint32_t mpu_get_rbar(void)
+{
+	return read_prbar();
+}
+
+static inline void mpu_set_rlar(uint32_t rlar)
+{
+	write_prlar(rlar);
+	__DSB();
+	__ISB();
+}
+
+static inline uint32_t mpu_get_rlar(void)
+{
+	return read_prlar();
+}
+
+static inline uint8_t mpu_get_num_regions(void)
+{
+	uint32_t type = read_mpuir();
+
+	type = (type >> MPU_IR_REGION_Pos) & MPU_IR_REGION_Msk;
+
+	return (uint8_t)type;
+}
+
+static inline void mpu_clear_region(uint32_t rnr)
+{
+	mpu_set_rnr(rnr);
+	mpu_set_rbar(0);
+	mpu_set_rlar(0);
+}
+
+#else
+#error "Unsupported ARM CPU"
+#endif
 
 /* Global MPU configuration at system initialization. */
 static void mpu_init(void)
@@ -36,20 +138,14 @@
 	/* Configure the cache-ability attributes for all the
 	 * different types of memory regions.
 	 */
+	mpu_set_mair0(MPU_MAIR_ATTRS);
+}
 
-	/* Flash region(s): Attribute-0
-	 * SRAM region(s): Attribute-1
-	 * SRAM no cache-able regions(s): Attribute-2
-	 */
-	MPU->MAIR0 =
-		((MPU_MAIR_ATTR_FLASH << MPU_MAIR0_Attr0_Pos) &
-			MPU_MAIR0_Attr0_Msk)
-		|
-		((MPU_MAIR_ATTR_SRAM << MPU_MAIR0_Attr1_Pos) &
-			MPU_MAIR0_Attr1_Msk)
-		|
-		((MPU_MAIR_ATTR_SRAM_NOCACHE << MPU_MAIR0_Attr2_Pos) &
-			MPU_MAIR0_Attr2_Msk);
+static void mpu_set_region(uint32_t rnr, uint32_t rbar, uint32_t rlar)
+{
+	mpu_set_rnr(rnr);
+	mpu_set_rbar(rbar);
+	mpu_set_rlar(rlar);
 }
 
 /* This internal function performs MPU region initialization.
@@ -60,7 +156,7 @@
 static void region_init(const uint32_t index,
 	const struct arm_mpu_region *region_conf)
 {
-	ARM_MPU_SetRegion(
+	mpu_set_region(
 		/* RNR */
 		index,
 		/* RBAR */
@@ -116,6 +212,21 @@
  * needs to be enabled.
  *
  */
+#if defined(CONFIG_AARCH32_ARMV8_R)
+static inline int get_region_index(uint32_t start, uint32_t size)
+{
+	uint32_t limit = (start + size - 1) & MPU_RLAR_LIMIT_Msk;
+
+	for (uint8_t idx = 0; idx < mpu_get_num_regions(); idx++) {
+		mpu_set_rnr(idx);
+		if (start >= (mpu_get_rbar() & MPU_RBAR_BASE_Msk) &&
+		    limit <= (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk)) {
+			return idx;
+		}
+	}
+	return -EINVAL;
+}
+#else
 static inline int get_region_index(uint32_t start, uint32_t size)
 {
 	uint32_t region_start_addr = arm_cmse_mpu_region_get(start);
@@ -129,48 +240,49 @@
 	}
 	return -EINVAL;
 }
+#endif
 
 static inline uint32_t mpu_region_get_base(const uint32_t index)
 {
-	MPU->RNR = index;
-	return MPU->RBAR & MPU_RBAR_BASE_Msk;
+	mpu_set_rnr(index);
+	return mpu_get_rbar() & MPU_RBAR_BASE_Msk;
 }
 
 static inline void mpu_region_set_base(const uint32_t index, const uint32_t base)
 {
-	MPU->RNR = index;
-	MPU->RBAR = (MPU->RBAR & (~MPU_RBAR_BASE_Msk))
-		| (base & MPU_RBAR_BASE_Msk);
+	mpu_set_rnr(index);
+	mpu_set_rbar((mpu_get_rbar() & (~MPU_RBAR_BASE_Msk))
+		     | (base & MPU_RBAR_BASE_Msk));
 }
 
 static inline uint32_t mpu_region_get_last_addr(const uint32_t index)
 {
-	MPU->RNR = index;
-	return (MPU->RLAR & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk);
+	mpu_set_rnr(index);
+	return (mpu_get_rlar() & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk);
 }
 
 static inline void mpu_region_set_limit(const uint32_t index, const uint32_t limit)
 {
-	MPU->RNR = index;
-	MPU->RLAR = (MPU->RLAR & (~MPU_RLAR_LIMIT_Msk))
-		| (limit & MPU_RLAR_LIMIT_Msk);
+	mpu_set_rnr(index);
+	mpu_set_rlar((mpu_get_rlar() & (~MPU_RLAR_LIMIT_Msk))
+		     | (limit & MPU_RLAR_LIMIT_Msk));
 }
 
 static inline void mpu_region_get_access_attr(const uint32_t index,
 	arm_mpu_region_attr_t *attr)
 {
-	MPU->RNR = index;
+	mpu_set_rnr(index);
 
-	attr->rbar = MPU->RBAR &
+	attr->rbar = mpu_get_rbar() &
 		(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk);
-	attr->mair_idx = (MPU->RLAR & MPU_RLAR_AttrIndx_Msk) >>
+	attr->mair_idx = (mpu_get_rlar() & MPU_RLAR_AttrIndx_Msk) >>
 		MPU_RLAR_AttrIndx_Pos;
 }
 
 static inline void mpu_region_get_conf(const uint32_t index,
 	struct arm_mpu_region *region_conf)
 {
-	MPU->RNR = index;
+	mpu_set_rnr(index);
 
 	/* Region attribution:
 	 * - Cache-ability
@@ -180,10 +292,10 @@
 	mpu_region_get_access_attr(index, &region_conf->attr);
 
 	/* Region base address */
-	region_conf->base = (MPU->RBAR & MPU_RBAR_BASE_Msk);
+	region_conf->base = mpu_get_rbar() & MPU_RBAR_BASE_Msk;
 
 	/* Region limit address */
-	region_conf->attr.r_limit = MPU->RLAR & MPU_RLAR_LIMIT_Msk;
+	region_conf->attr.r_limit = mpu_get_rlar() & MPU_RLAR_LIMIT_Msk;
 }
 
 /**
@@ -242,11 +354,76 @@
  */
 static inline int is_enabled_region(uint32_t index)
 {
-	MPU->RNR = index;
+	mpu_set_rnr(index);
 
-	return (MPU->RLAR & MPU_RLAR_EN_Msk) ? 1 : 0;
+	return (mpu_get_rlar() & MPU_RLAR_EN_Msk) ? 1 : 0;
 }
 
+#if defined(CONFIG_AARCH32_ARMV8_R)
+/**
+ * This internal function checks if the given buffer is in the region.
+ *
+ * Note:
+ *   The caller must provide a valid region number.
+ */
+static inline int is_in_region(uint32_t rnr, uint32_t start, uint32_t size)
+{
+	uint32_t rbar;
+	uint32_t rlar;
+
+	rbar = mpu_region_get_base(rnr);
+	rlar = mpu_region_get_last_addr(rnr);
+
+	if ((start >= rbar) && ((start + size) <= rlar)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int is_user_accessible_region(uint32_t rnr, int write)
+{
+	uint32_t r_ap;
+
+	mpu_set_rnr(rnr);
+
+	r_ap = (mpu_get_rbar() & MPU_RBAR_AP_Msk) >> MPU_RBAR_AP_Pos;
+
+	if (write != 0) {
+		return r_ap == P_RW_U_RW;
+	}
+
+	return ((r_ap == P_RW_U_RW) ||  (r_ap == P_RO_U_RO));
+}
+
+/**
+ * This internal function validates whether a given memory buffer
+ * is user accessible or not.
+ */
+static inline int mpu_buffer_validate(void *addr, size_t size, int write)
+{
+	int32_t rnr;
+	int rc = -EPERM;
+
+	int key = arch_irq_lock();
+
+	/* Iterate all mpu regions in reversed order */
+	for (rnr = 0; rnr < mpu_get_num_regions(); rnr++) {
+		if (!is_enabled_region(rnr) ||
+		    !is_in_region(rnr, (uint32_t)addr, size)) {
+			continue;
+		}
+
+		if (is_user_accessible_region(rnr, write)) {
+			rc = 0;
+		}
+	}
+
+	arch_irq_unlock(key);
+	return rc;
+}
+
+#else
 /**
  * This internal function validates whether a given memory buffer
  * is user accessible or not.
@@ -308,7 +485,7 @@
 #endif /* CONFIG_CPU_HAS_TEE */
 	return -EPERM;
 }
-
+#endif /* CONFIG_AARCH32_ARMV8_R */
 
 #endif /* CONFIG_USERSPACE */
 
@@ -539,11 +716,7 @@
 	/* Retrieve the number of regions from DTS configuration. */
 	return NUM_MPU_REGIONS;
 #else
-	uint32_t type = MPU->TYPE;
-
-	type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
-
-	return (uint8_t)type;
+	return mpu_get_num_regions();
 #endif /* NUM_MPU_REGIONS */
 }
 
@@ -562,7 +735,7 @@
 
 	/* Disable all MPU regions except for the static ones. */
 	for (int i = mpu_reg_index; i < get_num_regions(); i++) {
-		ARM_MPU_ClrRegion(i);
+		mpu_clear_region(i);
 	}
 
 #if defined(CONFIG_MPU_GAP_FILLING)
@@ -587,7 +760,7 @@
 	 * may be programmed.
 	 */
 	for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
-		ARM_MPU_ClrRegion(dyn_reg_info[i].index);
+		mpu_clear_region(dyn_reg_info[i].index);
 	}
 
 	/* The dynamic regions are now programmed on top of
diff --git a/arch/arm/core/aarch32/userspace.S b/arch/arm/core/aarch32/userspace.S
index 5d970e9..5ce2cc3 100644
--- a/arch/arm/core/aarch32/userspace.S
+++ b/arch/arm/core/aarch32/userspace.S
@@ -14,7 +14,7 @@
 
 #include <zephyr/arch/arm/aarch32/exc.h>
 
-#if defined(CONFIG_ARMV7_R)
+#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
 #include <zephyr/arch/cpu.h>
 #endif
 
@@ -63,7 +63,7 @@
     ldr r0, [r0, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
     ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
     add r0, r0, ip
-#elif defined(CONFIG_ARMV7_R)
+#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
     ldr r0, [r0, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
     ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
     add r0, r0, ip
@@ -79,7 +79,7 @@
      */
     mov ip, sp
 
-#if defined(CONFIG_ARMV7_R)
+#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
     mov sp, r0
 #else
     /* set stack to privileged stack
@@ -113,7 +113,7 @@
     mov r1, ip
     push {r0,r1}
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     push {r0,ip}
 #endif
 
@@ -145,7 +145,7 @@
 
     push {r0,r3}
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     pop {r0,ip}
 
     /* load up stack info from user stack */
@@ -169,7 +169,7 @@
     pop {r0, r1}
     mov ip, r1
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     pop {r0,ip}
 #endif
 
@@ -184,11 +184,11 @@
     mov lr, r4
     mov r4, ip
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     pop {r1,r2,r3,lr}
 #endif
 
-#if defined(CONFIG_ARMV7_R)
+#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
     /*
      * set stack to user stack.  We are in SYSTEM state, so r13 and r14 are
      * shared with USER state
@@ -244,7 +244,7 @@
     /* restore r0 */
     mov r0, lr
 
-#if defined(CONFIG_ARMV7_R)
+#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
     /* change processor mode to unprivileged, with all interrupts enabled. */
     msr CPSR_c, #MODE_USR
 #else
@@ -296,7 +296,7 @@
     mov ip, r0
     pop {r0, r1}
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     ldr ip, =z_thread_entry
 #endif
     bx ip
@@ -362,7 +362,7 @@
     subs ip, #8
     str sp, [ip, #0]
     str lr, [ip, #4]
-#elif defined(CONFIG_ARMV7_R)
+#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
     /*
      * The SVC handler has already switched to the privileged stack.
      * Store the user SP and LR at the beginning of the priv stack.
@@ -373,7 +373,7 @@
     push {ip, lr}
 #endif
 
-#if !defined(CONFIG_ARMV7_R)
+#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
     /* switch to privileged stack */
     msr PSP, ip
 #endif
@@ -449,7 +449,7 @@
     mov r0, ip
 
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     ldr ip, =K_SYSCALL_BAD
     cmp r6, ip
     bne valid_syscall
@@ -551,7 +551,7 @@
     msr CONTROL, r2
     pop {r2, r3}
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     ldr r0, =_kernel
     ldr r0, [r0, #_kernel_offset_to_current]
     ldr r1, [r0, #_thread_offset_to_mode]
@@ -615,7 +615,7 @@
      */
     mov ip, r8
     orrs ip, ip, #1
-#elif defined(CONFIG_ARMV7_R)
+#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
     /* Restore user stack pointer */
     ldr ip, [sp,#12]
     mov sp, ip
@@ -646,7 +646,7 @@
 
     /* sp+4 is error value, init to -1 */
 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
-    || defined(CONFIG_ARMV7_R)
+    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
     ldr r3, =-1
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
     mov.w r3, #-1
@@ -662,7 +662,7 @@
     ldrb r5, [r0, r3]
 
 z_arm_user_string_nlen_fault_end:
-#if defined(CONFIG_ARMV7_R)
+#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
     cmp r5, #0
     beq strlen_done
 
diff --git a/include/zephyr/arch/arm/aarch32/cortex_a_r/lib_helpers.h b/include/zephyr/arch/arm/aarch32/cortex_a_r/lib_helpers.h
index a5602bb..79d1dab 100644
--- a/include/zephyr/arch/arm/aarch32/cortex_a_r/lib_helpers.h
+++ b/include/zephyr/arch/arm/aarch32/cortex_a_r/lib_helpers.h
@@ -68,6 +68,7 @@
 MAKE_REG_HELPER(prselr,	     0, 6, 2, 1);
 MAKE_REG_HELPER(prbar,	     0, 6, 3, 0);
 MAKE_REG_HELPER(prlar,	     0, 6, 3, 1);
+MAKE_REG_HELPER(mair0,       0, 10, 2, 0);
 MAKE_REG_HELPER(vbar,        0, 12, 0, 0);
 MAKE_REG_HELPER(cntv_ctl,    0, 14,  3, 1);
 MAKE_REG64_HELPER(ICC_SGI1R, 0, 12);
diff --git a/include/zephyr/arch/arm/aarch32/mpu/arm_mpu.h b/include/zephyr/arch/arm/aarch32/mpu/arm_mpu.h
index d882611..65d9194 100644
--- a/include/zephyr/arch/arm/aarch32/mpu/arm_mpu.h
+++ b/include/zephyr/arch/arm/aarch32/mpu/arm_mpu.h
@@ -10,12 +10,13 @@
 	defined(CONFIG_CPU_CORTEX_M3) || \
 	defined(CONFIG_CPU_CORTEX_M4) || \
 	defined(CONFIG_CPU_CORTEX_M7) || \
-	defined(CONFIG_CPU_AARCH32_CORTEX_R)
+	defined(CONFIG_ARMV7_R)
 #include <zephyr/arch/arm/aarch32/mpu/arm_mpu_v7m.h>
 #elif defined(CONFIG_CPU_CORTEX_M23) || \
 	defined(CONFIG_CPU_CORTEX_M33) || \
-	defined(CONFIG_CPU_CORTEX_M55)
-#include <zephyr/arch/arm/aarch32/mpu/arm_mpu_v8m.h>
+	defined(CONFIG_CPU_CORTEX_M55) || \
+	defined(CONFIG_AARCH32_ARMV8_R)
+#include <zephyr/arch/arm/aarch32/mpu/arm_mpu_v8.h>
 #else
 #error "Unsupported ARM CPU"
 #endif
@@ -44,7 +45,7 @@
 	const struct arm_mpu_region *mpu_regions;
 };
 
-#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
+#if defined(CONFIG_ARMV7_R)
 #define MPU_REGION_ENTRY(_name, _base, _size, _attr) \
 	{\
 		.name = _name, \
diff --git a/include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8m.h b/include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8.h
similarity index 67%
rename from include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8m.h
rename to include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8.h
index 2701290..67a5a36 100644
--- a/include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8m.h
+++ b/include/zephyr/arch/arm/aarch32/mpu/arm_mpu_v8.h
@@ -7,12 +7,33 @@
 
 #ifndef _ASMLANGUAGE
 
-#include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
-
 /* Convenience macros to represent the ARMv8-M-specific
  * configuration for memory access permission and
  * cache-ability attribution.
  */
+#if defined(CONFIG_AARCH32_ARMV8_R)
+#define MPU_IR_REGION_Msk       (0xFFU)
+#define MPU_IR_REGION_Pos       8U
+/* MPU RBAR Register attribute msk Definitions */
+#define MPU_RBAR_BASE_Pos       6U
+#define MPU_RBAR_BASE_Msk       (0x3FFFFFFFFFFFFFFUL << MPU_RBAR_BASE_Pos)
+#define MPU_RBAR_SH_Pos         3U
+#define MPU_RBAR_SH_Msk         (0x3UL << MPU_RBAR_SH_Pos)
+#define MPU_RBAR_AP_Pos         1U
+#define MPU_RBAR_AP_Msk         (0x3UL << MPU_RBAR_AP_Pos)
+/* RBAR XN */
+#define MPU_RBAR_XN_Pos         0U
+#define MPU_RBAR_XN_Msk         (0x1UL << MPU_RBAR_XN_Pos)
+
+/* MPU PLBAR Register Definitions */
+#define MPU_RLAR_LIMIT_Pos      6U
+#define MPU_RLAR_LIMIT_Msk      (0x3FFFFFFFFFFFFFFUL << MPU_RLAR_LIMIT_Pos)
+#define MPU_RLAR_AttrIndx_Pos   1U
+#define MPU_RLAR_AttrIndx_Msk   (0x7UL << MPU_RLAR_AttrIndx_Pos)
+#define MPU_RLAR_EN_Msk         (0x1UL)
+#else
+#include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
+#endif
 
 /* Privileged No Access, Unprivileged No Access */
 /*#define NO_ACCESS       0x0 */
@@ -60,8 +81,31 @@
 #define REGION_LIMIT_ADDR(base, size) \
 	(((base & MPU_RBAR_BASE_Msk) + size - 1) & MPU_RLAR_LIMIT_Msk)
 
-
 /* Attribute flags for cache-ability */
+#if defined(CONFIG_AARCH32_ARMV8_R)
+/* Memory Attributes for Device Memory
+ * 1.Gathering (G/nG)
+ *   Determines whether multiple accesses can be merged into a single
+ *   bus transaction.
+ *   nG: Number/size of accesses on the bus = number/size of accesses
+ *   in code.
+ *
+ * 2.Reordering (R/nR)
+ *   Determines whether accesses to the same device can be reordered.
+ *   nR: Accesses to the same IMPLEMENTATION DEFINED block size will
+ *   appear on the bus in program order.
+ *
+ * 3 Early Write Acknowledgment (E/nE)
+ *   Indicates to the memory system whether a buffer can send
+ *   acknowledgements.
+ *   nE: The response should come from the end slave, not buffering in
+ *   the interconnect.
+ */
+#define DEVICE_nGnRnE	0x0U
+#define DEVICE_nGnRE	0x4U
+#define DEVICE_nGRE	0x8U
+#define DEVICE_GRE	0xCU
+#endif
 
 /* Read/Write Allocation Configurations for Cacheable Memory */
 #define R_NON_W_NON     0x0 /* Do not allocate Read/Write */
@@ -109,15 +153,98 @@
 #define MPU_MAIR_ATTR_SRAM_NOCACHE  MPU_CACHE_ATTRIBUTES_SRAM_NOCACHE
 #define MPU_MAIR_INDEX_SRAM_NOCACHE 2
 
+#if defined(CONFIG_AARCH32_ARMV8_R)
+#define MPU_MAIR_ATTR_DEVICE        DEVICE_nGnRnE
+#define MPU_MAIR_INDEX_DEVICE       3
+/* Flash region(s): Attribute-0
+ * SRAM region(s): Attribute-1
+ * SRAM no cache-able regions(s): Attribute-2
+ * DEVICE no cache-able regions(s): Attribute-3
+ */
+#define MPU_MAIR_ATTRS							     \
+	((MPU_MAIR_ATTR_FLASH << (MPU_MAIR_INDEX_FLASH * 8)) |		     \
+	 (MPU_MAIR_ATTR_SRAM << (MPU_MAIR_INDEX_SRAM * 8)) |		     \
+	 (MPU_MAIR_ATTR_SRAM_NOCACHE << (MPU_MAIR_INDEX_SRAM_NOCACHE * 8)) | \
+	 (MPU_MAIR_ATTR_DEVICE << (MPU_MAIR_INDEX_DEVICE * 8)))
+#else
+/* Flash region(s): Attribute-0
+ * SRAM region(s): Attribute-1
+ * SRAM no cache-able regions(s): Attribute-2
+ */
+#define MPU_MAIR_ATTRS								\
+	(((MPU_MAIR_ATTR_FLASH << MPU_MAIR0_Attr0_Pos) & MPU_MAIR0_Attr0_Msk) |	\
+	 ((MPU_MAIR_ATTR_SRAM << MPU_MAIR0_Attr1_Pos) & MPU_MAIR0_Attr1_Msk)  |	\
+	 ((MPU_MAIR_ATTR_SRAM_NOCACHE << MPU_MAIR0_Attr2_Pos) &			\
+	  MPU_MAIR0_Attr2_Msk))
+#endif
+
 /* Some helper defines for common regions.
  *
- * Note that the ARMv8-M MPU architecture requires that the
+ * Note that the ARMv8-M/R MPU architecture requires that the
  * enabled MPU regions are non-overlapping. Therefore, it is
  * recommended to use these helper defines only for configuring
  * fixed MPU regions at build-time (i.e. regions that are not
  * expected to be re-programmed or re-adjusted at run-time so
  * that they do not overlap with other MPU regions).
  */
+#if defined(CONFIG_AARCH32_ARMV8_R)
+#define REGION_RAM_ATTR(limit)						    \
+	{								    \
+		.rbar = NOT_EXEC |					    \
+			P_RW_U_NA_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
+		/* Cache-ability */					    \
+		.mair_idx = MPU_MAIR_INDEX_SRAM,			    \
+		.r_limit = limit - 1,  /* Region Limit */		    \
+	}
+
+#define REGION_RAM_TEXT_ATTR(limit)					    \
+	{								    \
+		.rbar = P_RO_U_RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
+		/* Cache-ability */					    \
+		.mair_idx = MPU_MAIR_INDEX_SRAM,			    \
+		.r_limit = limit - 1,  /* Region Limit */		    \
+	}
+
+#define REGION_RAM_RO_ATTR(limit)					    \
+	{								    \
+		.rbar = NOT_EXEC |					    \
+			P_RO_U_RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
+		/* Cache-ability */					    \
+		.mair_idx = MPU_MAIR_INDEX_SRAM,			    \
+		.r_limit = limit - 1,  /* Region Limit */		    \
+	}
+
+#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
+/* Note that the access permissions allow for un-privileged writes, contrary
+ * to ARMv7-M where un-privileged code has Read-Only permissions.
+ */
+#define REGION_FLASH_ATTR(limit)					    \
+	{								    \
+		.rbar = P_RW_U_RW_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
+		/* Cache-ability */					    \
+		.mair_idx = MPU_MAIR_INDEX_FLASH,			    \
+		.r_limit = limit - 1,  /* Region Limit */		    \
+	}
+#else /* CONFIG_MPU_ALLOW_FLASH_WRITE */
+#define REGION_FLASH_ATTR(limit)				     \
+	{							     \
+		.rbar = RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
+		/* Cache-ability */				     \
+		.mair_idx = MPU_MAIR_INDEX_FLASH,		     \
+		.r_limit = limit - 1,  /* Region Limit */	     \
+	}
+#endif /* CONFIG_MPU_ALLOW_FLASH_WRITE */
+
+#define REGION_DEVICE_ATTR(limit)				      \
+	{							      \
+		/* AP, XN, SH */				      \
+		.rbar = NOT_EXEC | P_RW_U_NA_Msk | NON_SHAREABLE_Msk, \
+		/* Cache-ability */				      \
+		.mair_idx = MPU_MAIR_INDEX_DEVICE,		      \
+		/* Region Limit */				      \
+		.r_limit = limit - 1,				      \
+	}
+#else
 #define REGION_RAM_ATTR(base, size) \
 	{\
 		.rbar = NOT_EXEC | \
@@ -157,6 +284,7 @@
 	}
 #endif /* CONFIG_MPU_ALLOW_FLASH_WRITE */
 
+#endif
 
 struct arm_mpu_region_attr {
 	/* Attributes belonging to RBAR */