blob: 215eb98b91624befab0181fba4596c72ea6de8ae [file] [log] [blame]
Daniel Leung2e5501a2021-12-06 14:41:18 -08001# Copyright (c) 2021 Intel Corporation
2#
3# SPDX-License-Identifier: Apache-2.0
4
5menu "Virtual Memory Support"
6
Daniel Leung7c6b0162021-12-06 15:04:03 -08007config KERNEL_VM_SUPPORT
8 bool
Daniel Leung2e5501a2021-12-06 14:41:18 -08009 help
Daniel Leung7c6b0162021-12-06 15:04:03 -080010 Hidden option to enable virtual memory Kconfigs.
Daniel Leung2e5501a2021-12-06 14:41:18 -080011
Daniel Leung7c6b0162021-12-06 15:04:03 -080012if KERNEL_VM_SUPPORT
Daniel Leung2e5501a2021-12-06 14:41:18 -080013
14DT_CHOSEN_Z_SRAM := zephyr,sram
15
16config KERNEL_VM_BASE
17 hex "Virtual address space base address"
18 default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
19 help
20 Define the base of the kernel's address space.
21
22 By default, this is the same as the DT_CHOSEN_Z_SRAM physical base SRAM
23 address from DTS, in which case RAM will be identity-mapped. Some
24 architectures may require RAM to be mapped in this way; they may have
25 just one RAM region and doing this makes linking much simpler, as
26 at least when the kernel boots all virtual RAM addresses are the same
27 as their physical address (demand paging at runtime may later modify
28 this for non-pinned page frames).
29
30 Otherwise, if RAM isn't identity-mapped:
31 1. It is the architecture's responsibility to transition the
32 instruction pointer to virtual addresses at early boot before
33 entering the kernel at z_cstart().
34 2. The underlying architecture may impose constraints on the bounds of
35 the kernel's address space, such as not overlapping physical RAM
36 regions if RAM is not identity-mapped, or the virtual and physical
37 base addresses being aligned to some common value (which allows
38 double-linking of paging structures to make the instruction pointer
39 transition simpler).
40
41 Zephyr does not implement a split address space and if multiple
42 page tables are in use, they all have the same virtual-to-physical
43 mappings (with potentially different permissions).
44
45config KERNEL_VM_OFFSET
46 hex "Kernel offset within address space"
47 default 0
48 help
49 Offset that the kernel image begins within its address space,
50 if this is not the same offset from the beginning of RAM.
51
52 Some care may need to be taken in selecting this value. In certain
53 build-time cases, or when a physical address cannot be looked up
54 in page tables, the equation:
55
56 virt = phys + ((KERNEL_VM_BASE + KERNEL_VM_OFFSET) -
57 (SRAM_BASE_ADDRESS + SRAM_OFFSET))
58
59 Will be used to convert between physical and virtual addresses for
60 memory that is mapped at boot.
61
62 This uncommon and is only necessary if the beginning of VM and
63 physical memory have dissimilar alignment.
64
65config KERNEL_VM_SIZE
66 hex "Size of kernel address space in bytes"
67 default 0x800000
68 help
69 Size of the kernel's address space. Constraining this helps control
70 how much total memory can be used for page tables.
71
72 The difference between KERNEL_VM_BASE and KERNEL_VM_SIZE indicates the
73 size of the virtual region for runtime memory mappings. This is needed
74 for mapping driver MMIO regions, as well as special RAM mapping use-cases
75 such as VSDO pages, memory mapped thread stacks, and anonymous memory
76 mappings. The kernel itself will be mapped in here as well at boot.
77
78 Systems with very large amounts of memory (such as 512M or more)
79 will want to use a 64-bit build of Zephyr, there are no plans to
80 implement a notion of "high" memory in Zephyr to work around physical
81 RAM size larger than the defined bounds of the virtual address space.
82
Daniel Leung7c6b0162021-12-06 15:04:03 -080083endif # KERNEL_VM_SUPPORT
84
85menuconfig MMU
Gerard Marull-Paretas95fb0de2022-03-09 12:05:12 +010086 bool "MMU features"
Daniel Leung7c6b0162021-12-06 15:04:03 -080087 depends on CPU_HAS_MMU
88 select KERNEL_VM_SUPPORT
89 help
90 This option is enabled when the CPU's memory management unit is active
91 and the arch_mem_map() API is available.
92
93if MMU
94config MMU_PAGE_SIZE
95 hex "Size of smallest granularity MMU page"
96 default 0x1000
97 help
98 Size of memory pages. Varies per MMU but 4K is common. For MMUs that
99 support multiple page sizes, put the smallest one here.
100
Daniel Leung2e5501a2021-12-06 14:41:18 -0800101menuconfig DEMAND_PAGING
Gerard Marull-Paretas95fb0de2022-03-09 12:05:12 +0100102 bool "Demand paging [EXPERIMENTAL]"
Daniel Leung2e5501a2021-12-06 14:41:18 -0800103 depends on ARCH_HAS_DEMAND_PAGING
104 help
105 Enable demand paging. Requires architecture support in how the kernel
106 is linked and the implementation of an eviction algorithm and a
107 backing store for evicted pages.
108
109if DEMAND_PAGING
110config DEMAND_PAGING_ALLOW_IRQ
111 bool "Allow interrupts during page-ins/outs"
112 help
113 Allow interrupts to be serviced while pages are being evicted or
114 retrieved from the backing store. This is much better for system
115 latency, but any code running in interrupt context that page faults
116 will cause a kernel panic. Such code must work with exclusively pinned
117 code and data pages.
118
119 The scheduler is still disabled during this operation.
120
121 If this option is disabled, the page fault servicing logic
122 runs with interrupts disabled for the entire operation. However,
123 ISRs may also page fault.
124
125config DEMAND_PAGING_PAGE_FRAMES_RESERVE
126 int "Number of page frames reserved for paging"
127 default 32 if !LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
128 default 0
129 help
130 This sets the number of page frames that will be reserved for
131 paging that do not count towards free memory. This is to
132 ensure that there are some page frames available for paging
133 code and data. Otherwise, it would be possible to exhaust
134 all page frames via anonymous memory mappings.
135
136config DEMAND_PAGING_STATS
137 bool "Gather Demand Paging Statistics"
138 help
139 This enables gathering various statistics related to demand paging,
140 e.g. number of pagefaults. This is useful for tuning eviction
141 algorithms and optimizing backing store.
142
143 Should say N in production system as this is not without cost.
144
145config DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
146 bool "Use Timing Functions to Gather Demand Paging Statistics"
147 select TIMING_FUNCTIONS_NEED_AT_BOOT
148 help
149 Use timing functions to gather various demand paging statistics.
150
151config DEMAND_PAGING_THREAD_STATS
152 bool "Gather per Thread Demand Paging Statistics"
153 depends on DEMAND_PAGING_STATS
154 help
155 This enables gathering per thread statistics related to demand
156 paging.
157
158 Should say N in production system as this is not without cost.
159
160config DEMAND_PAGING_TIMING_HISTOGRAM
161 bool "Gather Demand Paging Execution Timing Histogram"
162 depends on DEMAND_PAGING_STATS
163 help
164 This gathers the histogram of execution time on page eviction
165 selection, and backing store page in and page out.
166
167 Should say N in production system as this is not without cost.
168
169config DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS
Nazar Kazakovf483b1b2022-03-16 21:07:43 +0000170 int "Number of bins (buckets) in Demand Paging Timing Histogram"
Daniel Leung2e5501a2021-12-06 14:41:18 -0800171 depends on DEMAND_PAGING_TIMING_HISTOGRAM
172 default 10
173 help
174 Defines the number of bins (buckets) in the histogram used for
175 gathering execution timing information for demand paging.
176
177 This requires k_mem_paging_eviction_histogram_bounds[] and
178 k_mem_paging_backing_store_histogram_bounds[] to define
179 the upper bounds for each bin. See kernel/statistics.c for
180 information.
181
182endif # DEMAND_PAGING
183endif # MMU
184
185endmenu # Virtual Memory Support