Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2020 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | #ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H |
| 8 | #define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H |
| 9 | |
| 10 | #include <sys/util.h> |
Daniel Leung | 231a1e7 | 2021-05-12 13:08:49 -0700 | [diff] [blame] | 11 | #include <toolchain.h> |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 12 | |
| 13 | /* |
| 14 | * Caching mode definitions. These are mutually exclusive. |
| 15 | */ |
| 16 | |
| 17 | /** No caching. Most drivers want this. */ |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 18 | #define K_MEM_CACHE_NONE 2 |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 19 | |
| 20 | /** Write-through caching. Used by certain drivers. */ |
| 21 | #define K_MEM_CACHE_WT 1 |
| 22 | |
| 23 | /** Full write-back caching. Any RAM mapped wants this. */ |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 24 | #define K_MEM_CACHE_WB 0 |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 25 | |
| 26 | /** Reserved bits for cache modes in k_map() flags argument */ |
| 27 | #define K_MEM_CACHE_MASK (BIT(3) - 1) |
| 28 | |
| 29 | /* |
| 30 | * Region permission attributes. Default is read-only, no user, no exec |
| 31 | */ |
| 32 | |
| 33 | /** Region will have read/write access (and not read-only) */ |
| 34 | #define K_MEM_PERM_RW BIT(3) |
| 35 | |
| 36 | /** Region will be executable (normally forbidden) */ |
| 37 | #define K_MEM_PERM_EXEC BIT(4) |
| 38 | |
| 39 | /** Region will be accessible to user mode (normally supervisor-only) */ |
| 40 | #define K_MEM_PERM_USER BIT(5) |
| 41 | |
Andrew Boie | 5ca6f22 | 2020-11-06 15:31:55 -0800 | [diff] [blame] | 42 | /* |
| 43 | * This is the offset to subtract from a virtual address mapped in the |
| 44 | * kernel's permanent mapping of RAM, to obtain its physical address. |
| 45 | * |
| 46 | * virt_addr = phys_addr + Z_MEM_VM_OFFSET |
| 47 | * |
| 48 | * This only works for virtual addresses within the interval |
| 49 | * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)). |
| 50 | * |
| 51 | * These macros are intended for assembly, linker code, and static initializers. |
| 52 | * Use with care. |
| 53 | * |
| 54 | * Note that when demand paging is active, these will only work with page |
| 55 | * frames that are pinned to their virtual mapping at boot. |
| 56 | * |
| 57 | * TODO: This will likely need to move to an arch API or need additional |
| 58 | * constraints defined. |
| 59 | */ |
| 60 | #ifdef CONFIG_MMU |
| 61 | #define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \ |
| 62 | (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET)) |
| 63 | #else |
| 64 | #define Z_MEM_VM_OFFSET 0 |
| 65 | #endif |
| 66 | |
| 67 | #define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET) |
| 68 | #define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET) |
| 69 | |
Andrew Boie | acda9bf | 2021-03-06 17:44:44 -0800 | [diff] [blame] | 70 | #if Z_MEM_VM_OFFSET != 0 |
| 71 | #define Z_VM_KERNEL 1 |
| 72 | #ifdef CONFIG_XIP |
| 73 | #error "XIP and a virtual memory kernel are not allowed" |
| 74 | #endif |
| 75 | #endif |
| 76 | |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 77 | #ifndef _ASMLANGUAGE |
| 78 | #include <stdint.h> |
| 79 | #include <stddef.h> |
Andrew Boie | bcc69f9 | 2020-07-15 14:37:54 -0700 | [diff] [blame] | 80 | #include <inttypes.h> |
| 81 | #include <sys/__assert.h> |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 82 | |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 83 | struct k_mem_paging_stats_t { |
| 84 | #ifdef CONFIG_DEMAND_PAGING_STATS |
| 85 | struct { |
| 86 | /** Number of page faults */ |
| 87 | unsigned long cnt; |
| 88 | |
| 89 | /** Number of page faults with IRQ locked */ |
| 90 | unsigned long irq_locked; |
| 91 | |
| 92 | /** Number of page faults with IRQ unlocked */ |
| 93 | unsigned long irq_unlocked; |
| 94 | |
| 95 | #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ |
| 96 | /** Number of page faults while in ISR */ |
| 97 | unsigned long in_isr; |
| 98 | #endif |
| 99 | } pagefaults; |
| 100 | |
| 101 | struct { |
| 102 | /** Number of clean pages selected for eviction */ |
| 103 | unsigned long clean; |
| 104 | |
| 105 | /** Number of dirty pages selected for eviction */ |
| 106 | unsigned long dirty; |
| 107 | } eviction; |
| 108 | #endif /* CONFIG_DEMAND_PAGING_STATS */ |
| 109 | }; |
| 110 | |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 111 | struct k_mem_paging_histogram_t { |
| 112 | #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM |
| 113 | /* Counts for each bin in timing histogram */ |
| 114 | unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS]; |
| 115 | |
| 116 | /* Bounds for the bins in timing histogram, |
| 117 | * excluding the first and last (hence, NUM_SLOTS - 1). |
| 118 | */ |
| 119 | unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS]; |
| 120 | #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */ |
| 121 | }; |
| 122 | |
Andrew Boie | 5ca6f22 | 2020-11-06 15:31:55 -0800 | [diff] [blame] | 123 | /* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */ |
| 124 | static inline uintptr_t z_mem_phys_addr(void *virt) |
| 125 | { |
| 126 | uintptr_t addr = (uintptr_t)virt; |
| 127 | |
| 128 | #ifdef CONFIG_MMU |
| 129 | __ASSERT((addr >= CONFIG_KERNEL_VM_BASE) && |
| 130 | (addr < (CONFIG_KERNEL_VM_BASE + |
| 131 | (CONFIG_KERNEL_VM_SIZE))), |
| 132 | "address %p not in permanent mappings", virt); |
| 133 | #else |
| 134 | /* Should be identity-mapped */ |
| 135 | __ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) && |
| 136 | (addr < (CONFIG_SRAM_BASE_ADDRESS + |
| 137 | (CONFIG_SRAM_SIZE * 1024UL))), |
| 138 | "physical address 0x%lx not in RAM", |
| 139 | (unsigned long)addr); |
| 140 | #endif /* CONFIG_MMU */ |
| 141 | |
| 142 | /* TODO add assertion that this page is pinned to boot mapping, |
| 143 | * the above checks won't be sufficient with demand paging |
| 144 | */ |
| 145 | |
| 146 | return Z_MEM_PHYS_ADDR(addr); |
| 147 | } |
| 148 | |
| 149 | /* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */ |
| 150 | static inline void *z_mem_virt_addr(uintptr_t phys) |
| 151 | { |
| 152 | __ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) && |
| 153 | (phys < (CONFIG_SRAM_BASE_ADDRESS + |
| 154 | (CONFIG_SRAM_SIZE * 1024UL))), |
| 155 | "physical address 0x%lx not in RAM", (unsigned long)phys); |
| 156 | |
| 157 | /* TODO add assertion that this page frame is pinned to boot mapping, |
| 158 | * the above check won't be sufficient with demand paging |
| 159 | */ |
| 160 | |
| 161 | return (void *)Z_MEM_VIRT_ADDR(phys); |
| 162 | } |
| 163 | |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 164 | #ifdef __cplusplus |
| 165 | extern "C" { |
| 166 | #endif |
| 167 | |
| 168 | /** |
| 169 | * Map a physical memory region into the kernel's virtual address space |
| 170 | * |
Andrew Boie | e35f179 | 2020-12-09 12:18:40 -0800 | [diff] [blame] | 171 | * This function is intended for mapping memory-mapped I/O regions into |
| 172 | * the virtual address space. Given a physical address and a size, return a |
| 173 | * linear address representing the base of where the physical region is mapped |
| 174 | * in the virtual address space for the Zephyr kernel. |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 175 | * |
| 176 | * This function alters the active page tables in the area reserved |
| 177 | * for the kernel. This function will choose the virtual address |
| 178 | * and return it to the caller. |
| 179 | * |
| 180 | * Portable code should never assume that phys_addr and linear_addr will |
| 181 | * be equal. |
| 182 | * |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 183 | * Caching and access properties are controlled by the 'flags' parameter. |
| 184 | * Unused bits in 'flags' are reserved for future expansion. |
| 185 | * A caching mode must be selected. By default, the region is read-only |
| 186 | * with user access and code execution forbidden. This policy is changed |
| 187 | * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter. |
| 188 | * |
Andrew Boie | e35f179 | 2020-12-09 12:18:40 -0800 | [diff] [blame] | 189 | * If there is insufficient virtual address space for the mapping this will |
| 190 | * generate a kernel panic. |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 191 | * |
| 192 | * This API is only available if CONFIG_MMU is enabled. |
| 193 | * |
Andrew Boie | e35f179 | 2020-12-09 12:18:40 -0800 | [diff] [blame] | 194 | * It is highly discouraged to use this function to map system RAM page |
| 195 | * frames. It may conflict with anonymous memory mappings and demand paging |
| 196 | * and produce undefined behavior. Do not use this for RAM unless you know |
| 197 | * exactly what you are doing. If you need a chunk of memory, use k_mem_map(). |
| 198 | * If you need a contiguous buffer of physical memory, statically declare it |
| 199 | * and pin it at build time, it will be mapped when the system boots. |
| 200 | * |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 201 | * This API is part of infrastructure still under development and may |
| 202 | * change. |
| 203 | * |
Andrew Boie | d2ad783 | 2020-12-15 15:47:18 -0800 | [diff] [blame] | 204 | * @param virt [out] Output virtual address storage location |
| 205 | * @param phys Physical address base of the memory region |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 206 | * @param size Size of the memory region |
| 207 | * @param flags Caching mode and access flags, see K_MAP_* macros |
| 208 | */ |
Andrew Boie | d2ad783 | 2020-12-15 15:47:18 -0800 | [diff] [blame] | 209 | void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, |
| 210 | uint32_t flags); |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 211 | |
Daniel Leung | e6df25f | 2021-04-14 12:57:03 -0700 | [diff] [blame] | 212 | /** |
| 213 | * Unmap a virtual memory region from kernel's virtual address space. |
| 214 | * |
| 215 | * This function is intended to be used by drivers and early boot routines |
| 216 | * where temporary memory mappings need to be made. This allows these |
| 217 | * memory mappings to be discarded once they are no longer needed. |
| 218 | * |
| 219 | * This function alters the active page tables in the area reserved |
| 220 | * for the kernel. |
| 221 | * |
| 222 | * This will align the input parameters to page boundaries so that |
| 223 | * this can be used with the virtual address as returned by |
| 224 | * z_phys_map(). |
| 225 | * |
| 226 | * This API is only available if CONFIG_MMU is enabled. |
| 227 | * |
| 228 | * It is highly discouraged to use this function to unmap memory mappings. |
| 229 | * It may conflict with anonymous memory mappings and demand paging and |
| 230 | * produce undefined behavior. Do not use this unless you know exactly |
| 231 | * what you are doing. |
| 232 | * |
| 233 | * This API is part of infrastructure still under development and may |
| 234 | * change. |
| 235 | * |
| 236 | * @param virt Starting address of the virtual address region to be unmapped. |
| 237 | * @param size Size of the virtual address region |
| 238 | */ |
| 239 | void z_phys_unmap(uint8_t *virt, size_t size); |
| 240 | |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 241 | /* |
| 242 | * k_mem_map() control flags |
| 243 | */ |
| 244 | |
| 245 | /** |
| 246 | * @def K_MEM_MAP_UNINIT |
| 247 | * |
| 248 | * @brief The mapped region is not guaranteed to be zeroed. |
| 249 | * |
| 250 | * This may improve performance. The associated page frames may contain |
| 251 | * indeterminate data, zeroes, or even sensitive information. |
| 252 | * |
| 253 | * This may not be used with K_MEM_PERM_USER as there are no circumstances |
| 254 | * where this is safe. |
| 255 | */ |
| 256 | #define K_MEM_MAP_UNINIT BIT(16) |
| 257 | |
| 258 | /** |
| 259 | * @def K_MEM_MAP_LOCK |
| 260 | * |
| 261 | * Region will be pinned in memory and never paged |
| 262 | * |
| 263 | * Such memory is guaranteed to never produce a page fault due to page-outs |
| 264 | * or copy-on-write once the mapping call has returned. Physical page frames |
| 265 | * will be pre-fetched as necessary and pinned. |
| 266 | */ |
| 267 | #define K_MEM_MAP_LOCK BIT(17) |
| 268 | |
| 269 | /** |
| 270 | * @def K_MEM_MAP_GUARD |
| 271 | * |
| 272 | * A un-mapped virtual guard page will be placed in memory immediately preceding |
| 273 | * the mapped region. This page will still be noted as being used by the |
| 274 | * virtual memory manager. The total size of the allocation will be the |
| 275 | * requested size plus the size of this guard page. The returned address |
| 276 | * pointer will not include the guard page immediately below it. The typical |
| 277 | * use-case is downward-growing thread stacks. |
| 278 | * |
| 279 | * Zephyr treats page faults on this guard page as a fatal K_ERR_STACK_CHK_FAIL |
| 280 | * if it determines it immediately precedes a stack buffer, this is |
| 281 | * implemented in the architecture layer. |
Daniel Leung | fe48f5a | 2021-04-14 11:55:47 -0700 | [diff] [blame] | 282 | * |
| 283 | * DEPRECATED: k_mem_map() will always allocate guard pages, so this bit |
| 284 | * no longer has any effect. |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 285 | */ |
Daniel Leung | fe48f5a | 2021-04-14 11:55:47 -0700 | [diff] [blame] | 286 | #define K_MEM_MAP_GUARD __DEPRECATED_MACRO BIT(18) |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 287 | |
| 288 | /** |
Andrew Boie | 5db615b | 2020-12-18 11:50:58 -0800 | [diff] [blame] | 289 | * Return the amount of free memory available |
| 290 | * |
| 291 | * The returned value will reflect how many free RAM page frames are available. |
| 292 | * If demand paging is enabled, it may still be possible to allocate more. |
| 293 | * |
| 294 | * The information reported by this function may go stale immediately if |
| 295 | * concurrent memory mappings or page-ins take place. |
| 296 | * |
| 297 | * @return Free physical RAM, in bytes |
| 298 | */ |
| 299 | size_t k_mem_free_get(void); |
| 300 | |
| 301 | /** |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 302 | * Map anonymous memory into Zephyr's address space |
| 303 | * |
| 304 | * This function effectively increases the data space available to Zephyr. |
| 305 | * The kernel will choose a base virtual address and return it to the caller. |
| 306 | * The memory will have access permissions for all contexts set per the |
| 307 | * provided flags argument. |
| 308 | * |
| 309 | * If user thread access control needs to be managed in any way, do not enable |
| 310 | * K_MEM_PERM_USER flags here; instead manage the region's permissions |
| 311 | * with memory domain APIs after the mapping has been established. Setting |
| 312 | * K_MEM_PERM_USER here will allow all user threads to access this memory |
| 313 | * which is usually undesirable. |
| 314 | * |
| 315 | * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed. |
| 316 | * |
| 317 | * The mapped region is not guaranteed to be physically contiguous in memory. |
| 318 | * Physically contiguous buffers should be allocated statically and pinned |
| 319 | * at build time. |
| 320 | * |
| 321 | * Pages mapped in this way have write-back cache settings. |
| 322 | * |
| 323 | * The returned virtual memory pointer will be page-aligned. The size |
| 324 | * parameter, and any base address for re-mapping purposes must be page- |
| 325 | * aligned. |
| 326 | * |
Daniel Leung | fe48f5a | 2021-04-14 11:55:47 -0700 | [diff] [blame] | 327 | * Note that the allocation includes two guard pages immediately before |
| 328 | * and after the requested region. The total size of the allocation will be |
| 329 | * the requested size plus the size of these two guard pages. |
| 330 | * |
Andrew Boie | 8ccec8e | 2020-12-16 18:56:02 -0800 | [diff] [blame] | 331 | * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this |
| 332 | * function, with details in the documentation for these flags. |
| 333 | * |
| 334 | * @param size Size of the memory mapping. This must be page-aligned. |
| 335 | * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags. |
| 336 | * @return The mapped memory location, or NULL if insufficient virtual address |
| 337 | * space, insufficient physical memory to establish the mapping, |
| 338 | * or insufficient memory for paging structures. |
| 339 | */ |
| 340 | void *k_mem_map(size_t size, uint32_t flags); |
| 341 | |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 342 | /** |
Daniel Leung | c254c58 | 2021-04-15 12:38:20 -0700 | [diff] [blame] | 343 | * Un-map mapped memory |
| 344 | * |
| 345 | * This removes a memory mapping for the provided page-aligned region. |
| 346 | * Associated page frames will be free and the kernel may re-use the associated |
| 347 | * virtual address region. Any paged out data pages may be discarded. |
| 348 | * |
| 349 | * Calling this function on a region which was not mapped to begin with is |
| 350 | * undefined behavior. |
| 351 | * |
| 352 | * @param addr Page-aligned memory region base virtual address |
| 353 | * @param size Page-aligned memory region size |
| 354 | */ |
| 355 | void k_mem_unmap(void *addr, size_t size); |
| 356 | |
| 357 | /** |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 358 | * Given an arbitrary region, provide a aligned region that covers it |
| 359 | * |
| 360 | * The returned region will have both its base address and size aligned |
| 361 | * to the provided alignment value. |
| 362 | * |
| 363 | * @param aligned_addr [out] Aligned address |
| 364 | * @param aligned_size [out] Aligned region size |
| 365 | * @param addr Region base address |
| 366 | * @param size Region size |
| 367 | * @param align What to align the address and size to |
| 368 | * @retval offset between aligned_addr and addr |
| 369 | */ |
| 370 | size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, |
| 371 | uintptr_t addr, size_t size, size_t align); |
| 372 | |
Daniel Leung | 231a1e7 | 2021-05-12 13:08:49 -0700 | [diff] [blame] | 373 | /** |
| 374 | * @defgroup mem-demand-paging Demand Paging APIs |
| 375 | * @{ |
| 376 | */ |
| 377 | |
Andrew Boie | b9bbef2 | 2020-12-09 10:01:37 -0800 | [diff] [blame] | 378 | /** |
| 379 | * Evict a page-aligned virtual memory region to the backing store |
| 380 | * |
| 381 | * Useful if it is known that a memory region will not be used for some time. |
| 382 | * All the data pages within the specified region will be evicted to the |
| 383 | * backing store if they weren't already, with their associated page frames |
| 384 | * marked as available for mappings or page-ins. |
| 385 | * |
| 386 | * None of the associated page frames mapped to the provided region should |
| 387 | * be pinned. |
| 388 | * |
| 389 | * Note that there are no guarantees how long these pages will be evicted, |
| 390 | * they could take page faults immediately. |
| 391 | * |
| 392 | * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be |
| 393 | * called by ISRs as the backing store may be in-use. |
| 394 | * |
| 395 | * @param addr Base page-aligned virtual address |
| 396 | * @param size Page-aligned data region size |
| 397 | * @retval 0 Success |
| 398 | * @retval -ENOMEM Insufficient space in backing store to satisfy request. |
| 399 | * The region may be partially paged out. |
| 400 | */ |
Andrew Boie | 6c97ab3 | 2021-01-20 17:03:13 -0800 | [diff] [blame] | 401 | int k_mem_page_out(void *addr, size_t size); |
Andrew Boie | b9bbef2 | 2020-12-09 10:01:37 -0800 | [diff] [blame] | 402 | |
| 403 | /** |
| 404 | * Load a virtual data region into memory |
| 405 | * |
| 406 | * After the function completes, all the page frames associated with this |
| 407 | * function will be paged in. However, they are not guaranteed to stay there. |
| 408 | * This is useful if the region is known to be used soon. |
| 409 | * |
| 410 | * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be |
| 411 | * called by ISRs as the backing store may be in-use. |
| 412 | * |
| 413 | * @param addr Base page-aligned virtual address |
| 414 | * @param size Page-aligned data region size |
| 415 | */ |
Andrew Boie | 6c97ab3 | 2021-01-20 17:03:13 -0800 | [diff] [blame] | 416 | void k_mem_page_in(void *addr, size_t size); |
Andrew Boie | b9bbef2 | 2020-12-09 10:01:37 -0800 | [diff] [blame] | 417 | |
| 418 | /** |
| 419 | * Pin an aligned virtual data region, paging in as necessary |
| 420 | * |
| 421 | * After the function completes, all the page frames associated with this |
| 422 | * region will be resident in memory and pinned such that they stay that way. |
| 423 | * This is a stronger version of z_mem_page_in(). |
| 424 | * |
| 425 | * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be |
| 426 | * called by ISRs as the backing store may be in-use. |
| 427 | * |
| 428 | * @param addr Base page-aligned virtual address |
| 429 | * @param size Page-aligned data region size |
| 430 | */ |
Andrew Boie | 6c97ab3 | 2021-01-20 17:03:13 -0800 | [diff] [blame] | 431 | void k_mem_pin(void *addr, size_t size); |
Andrew Boie | b9bbef2 | 2020-12-09 10:01:37 -0800 | [diff] [blame] | 432 | |
| 433 | /** |
| 434 | * Un-pin an aligned virtual data region |
| 435 | * |
| 436 | * After the function completes, all the page frames associated with this |
| 437 | * region will be no longer marked as pinned. This does not evict the region, |
| 438 | * follow this with z_mem_page_out() if you need that. |
| 439 | * |
| 440 | * @param addr Base page-aligned virtual address |
| 441 | * @param size Page-aligned data region size |
| 442 | */ |
Andrew Boie | 6c97ab3 | 2021-01-20 17:03:13 -0800 | [diff] [blame] | 443 | void k_mem_unpin(void *addr, size_t size); |
Andrew Boie | b9bbef2 | 2020-12-09 10:01:37 -0800 | [diff] [blame] | 444 | |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 445 | /** |
| 446 | * Get the paging statistics since system startup |
| 447 | * |
| 448 | * This populates the paging statistics struct being passed in |
| 449 | * as argument. |
| 450 | * |
| 451 | * @param[in,out] stats Paging statistics struct to be filled. |
| 452 | */ |
| 453 | __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats); |
| 454 | |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 455 | struct k_thread; |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 456 | /** |
| 457 | * Get the paging statistics since system startup for a thread |
| 458 | * |
| 459 | * This populates the paging statistics struct being passed in |
| 460 | * as argument for a particular thread. |
| 461 | * |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 462 | * @param[in] thread Thread |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 463 | * @param[in,out] stats Paging statistics struct to be filled. |
| 464 | */ |
| 465 | __syscall |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 466 | void k_mem_paging_thread_stats_get(struct k_thread *thread, |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 467 | struct k_mem_paging_stats_t *stats); |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 468 | |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 469 | /** |
| 470 | * Get the eviction timing histogram |
| 471 | * |
| 472 | * This populates the timing histogram struct being passed in |
| 473 | * as argument. |
| 474 | * |
Kumar Gala | a2bb091 | 2021-05-28 11:12:23 -0500 | [diff] [blame] | 475 | * @param[in,out] hist Timing histogram struct to be filled. |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 476 | */ |
| 477 | __syscall void k_mem_paging_histogram_eviction_get( |
| 478 | struct k_mem_paging_histogram_t *hist); |
| 479 | |
| 480 | /** |
| 481 | * Get the backing store page-in timing histogram |
| 482 | * |
| 483 | * This populates the timing histogram struct being passed in |
| 484 | * as argument. |
| 485 | * |
Kumar Gala | a2bb091 | 2021-05-28 11:12:23 -0500 | [diff] [blame] | 486 | * @param[in,out] hist Timing histogram struct to be filled. |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 487 | */ |
| 488 | __syscall void k_mem_paging_histogram_backing_store_page_in_get( |
| 489 | struct k_mem_paging_histogram_t *hist); |
| 490 | |
| 491 | /** |
| 492 | * Get the backing store page-out timing histogram |
| 493 | * |
| 494 | * This populates the timing histogram struct being passed in |
| 495 | * as argument. |
| 496 | * |
Kumar Gala | a2bb091 | 2021-05-28 11:12:23 -0500 | [diff] [blame] | 497 | * @param[in,out] hist Timing histogram struct to be filled. |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 498 | */ |
| 499 | __syscall void k_mem_paging_histogram_backing_store_page_out_get( |
| 500 | struct k_mem_paging_histogram_t *hist); |
Daniel Leung | 8eea511 | 2021-03-30 14:38:00 -0700 | [diff] [blame] | 501 | |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 502 | #include <syscalls/mem_manage.h> |
| 503 | |
Daniel Leung | 231a1e7 | 2021-05-12 13:08:49 -0700 | [diff] [blame] | 504 | /** @} */ |
Daniel Leung | ae86519 | 2021-03-26 12:03:42 -0700 | [diff] [blame] | 505 | |
Daniel Leung | 31c362d | 2021-05-13 11:02:56 -0700 | [diff] [blame] | 506 | /** |
| 507 | * Eviction algorithm APIs |
| 508 | * |
| 509 | * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs |
| 510 | * @{ |
| 511 | */ |
| 512 | |
| 513 | /** |
| 514 | * Select a page frame for eviction |
| 515 | * |
| 516 | * The kernel will invoke this to choose a page frame to evict if there |
| 517 | * are no free page frames. |
| 518 | * |
| 519 | * This function will never be called before the initial |
| 520 | * k_mem_paging_eviction_init(). |
| 521 | * |
| 522 | * This function is invoked with interrupts locked. |
| 523 | * |
Kumar Gala | a2bb091 | 2021-05-28 11:12:23 -0500 | [diff] [blame] | 524 | * @param [out] dirty Whether the page to evict is dirty |
Daniel Leung | 31c362d | 2021-05-13 11:02:56 -0700 | [diff] [blame] | 525 | * @return The page frame to evict |
| 526 | */ |
| 527 | struct z_page_frame *k_mem_paging_eviction_select(bool *dirty); |
| 528 | |
| 529 | /** |
| 530 | * Initialization function |
| 531 | * |
| 532 | * Called at POST_KERNEL to perform any necessary initialization tasks for the |
| 533 | * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be |
| 534 | * called until this has returned, and this will only be called once. |
| 535 | */ |
| 536 | void k_mem_paging_eviction_init(void); |
| 537 | |
| 538 | /** @} */ |
| 539 | |
Daniel Leung | dfa4b7e | 2021-05-13 11:57:54 -0700 | [diff] [blame] | 540 | /** |
| 541 | * Backing store APIs |
| 542 | * |
| 543 | * @defgroup mem-demand-paging-backing-store Backing Store APIs |
| 544 | * @{ |
| 545 | */ |
| 546 | |
| 547 | /** |
| 548 | * Reserve or fetch a storage location for a data page loaded into a page frame |
| 549 | * |
| 550 | * The returned location token must be unique to the mapped virtual address. |
| 551 | * This location will be used in the backing store to page out data page |
| 552 | * contents for later retrieval. The location value must be page-aligned. |
| 553 | * |
| 554 | * This function may be called multiple times on the same data page. If its |
| 555 | * page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return |
| 556 | * the previous backing store location for the data page containing a cached |
| 557 | * clean copy. This clean copy may be updated on page-out, or used to |
| 558 | * discard clean pages without needing to write out their contents. |
| 559 | * |
| 560 | * If the backing store is full, some other backing store location which caches |
| 561 | * a loaded data page may be selected, in which case its associated page frame |
| 562 | * will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached). |
| 563 | * |
| 564 | * pf->addr will indicate the virtual address the page is currently mapped to. |
| 565 | * Large, sparse backing stores which can contain the entire address space |
| 566 | * may simply generate location tokens purely as a function of pf->addr with no |
| 567 | * other management necessary. |
| 568 | * |
| 569 | * This function distinguishes whether it was called on behalf of a page |
| 570 | * fault. A free backing store location must always be reserved in order for |
| 571 | * page faults to succeed. If the page_fault parameter is not set, this |
| 572 | * function should return -ENOMEM even if one location is available. |
| 573 | * |
| 574 | * This function is invoked with interrupts locked. |
| 575 | * |
Kumar Gala | a2bb091 | 2021-05-28 11:12:23 -0500 | [diff] [blame] | 576 | * @param pf Virtual address to obtain a storage location |
Daniel Leung | dfa4b7e | 2021-05-13 11:57:54 -0700 | [diff] [blame] | 577 | * @param [out] location storage location token |
| 578 | * @param page_fault Whether this request was for a page fault |
| 579 | * @return 0 Success |
| 580 | * @return -ENOMEM Backing store is full |
| 581 | */ |
| 582 | int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, |
| 583 | uintptr_t *location, |
| 584 | bool page_fault); |
| 585 | |
| 586 | /** |
| 587 | * Free a backing store location |
| 588 | * |
| 589 | * Any stored data may be discarded, and the location token associated with |
| 590 | * this address may be re-used for some other data page. |
| 591 | * |
| 592 | * This function is invoked with interrupts locked. |
| 593 | * |
| 594 | * @param location Location token to free |
| 595 | */ |
| 596 | void k_mem_paging_backing_store_location_free(uintptr_t location); |
| 597 | |
| 598 | /** |
| 599 | * Copy a data page from Z_SCRATCH_PAGE to the specified location |
| 600 | * |
| 601 | * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write |
| 602 | * to the intended source page frame for the calling context. |
| 603 | * |
| 604 | * Calls to this and k_mem_paging_backing_store_page_in() will always be |
| 605 | * serialized, but interrupts may be enabled. |
| 606 | * |
| 607 | * @param location Location token for the data page, for later retrieval |
| 608 | */ |
| 609 | void k_mem_paging_backing_store_page_out(uintptr_t location); |
| 610 | |
| 611 | /** |
| 612 | * Copy a data page from the provided location to Z_SCRATCH_PAGE. |
| 613 | * |
| 614 | * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write |
| 615 | * to the intended destination page frame for the calling context. |
| 616 | * |
| 617 | * Calls to this and k_mem_paging_backing_store_page_out() will always be |
| 618 | * serialized, but interrupts may be enabled. |
| 619 | * |
| 620 | * @param location Location token for the data page |
| 621 | */ |
| 622 | void k_mem_paging_backing_store_page_in(uintptr_t location); |
| 623 | |
| 624 | /** |
| 625 | * Update internal accounting after a page-in |
| 626 | * |
| 627 | * This is invoked after k_mem_paging_backing_store_page_in() and interrupts |
| 628 | * have been* re-locked, making it safe to access the z_page_frame data. |
| 629 | * The location value will be the same passed to |
| 630 | * k_mem_paging_backing_store_page_in(). |
| 631 | * |
| 632 | * The primary use-case for this is to update custom fields for the backing |
| 633 | * store in the page frame, to reflect where the data should be evicted to |
| 634 | * if it is paged out again. This may be a no-op in some implementations. |
| 635 | * |
| 636 | * If the backing store caches paged-in data pages, this is the appropriate |
| 637 | * time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging |
| 638 | * out clean data pages if they are noted as clean in the page tables and the |
| 639 | * Z_PAGE_FRAME_BACKED bit is set in their associated page frame. |
| 640 | * |
| 641 | * @param pf Page frame that was loaded in |
| 642 | * @param location Location of where the loaded data page was retrieved |
| 643 | */ |
| 644 | void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, |
| 645 | uintptr_t location); |
| 646 | |
| 647 | /** |
| 648 | * Backing store initialization function. |
| 649 | * |
| 650 | * The implementation may expect to receive page in/out calls as soon as this |
| 651 | * returns, but not before that. Called at POST_KERNEL. |
| 652 | * |
| 653 | * This function is expected to do two things: |
| 654 | * - Initialize any internal data structures and accounting for the backing |
| 655 | * store. |
| 656 | * - If the backing store already contains all or some loaded kernel data pages |
| 657 | * at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their |
| 658 | * associated page frames, and any internal accounting set up appropriately. |
| 659 | */ |
| 660 | void k_mem_paging_backing_store_init(void); |
| 661 | |
| 662 | /** @} */ |
| 663 | |
Andrew Boie | 06cf6d2 | 2020-06-26 16:17:00 -0700 | [diff] [blame] | 664 | #ifdef __cplusplus |
| 665 | } |
| 666 | #endif |
| 667 | |
| 668 | #endif /* !_ASMLANGUAGE */ |
| 669 | #endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */ |