mmu: pin the whole kernel
This will enable testing of the implementation until the
critical set of pages is identified and known to the
kernel.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
diff --git a/kernel/mmu.c b/kernel/mmu.c
index b1cf336..c1c8826 100644
--- a/kernel/mmu.c
+++ b/kernel/mmu.c
@@ -527,8 +527,20 @@
*/
VIRT_FOREACH(Z_KERNEL_VIRT_START, Z_KERNEL_VIRT_SIZE, addr)
{
- frame_mapped_set(z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr)),
- addr);
+ pf = z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr));
+ frame_mapped_set(pf, addr);
+
+ /* TODO: for now we pin the whole Zephyr image. Demand paging
+ * currently tested with anonymously-mapped pages which are not
+ * pinned.
+ *
+ * We will need to setup linker regions for a subset of kernel
+ * code/data pages which are pinned in memory and
+ * may not be evicted. This will contain critical CPU data
+ * structures, and any code used to perform page fault
+ * handling, page-ins, etc.
+ */
+ pf->flags |= Z_PAGE_FRAME_PINNED;
}
/* Any remaining pages that aren't mapped, reserved, or pinned get