Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
| 8 | #include <zephyr/kernel_structs.h> |
Anas Nashif | 4d994af | 2021-04-18 23:24:40 -0400 | [diff] [blame] | 9 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 10 | #include <zephyr/toolchain.h> |
| 11 | #include <zephyr/linker/sections.h> |
| 12 | #include <zephyr/wait_q.h> |
| 13 | #include <zephyr/sys/dlist.h> |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 14 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 15 | #include <zephyr/init.h> |
| 16 | #include <zephyr/sys/check.h> |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 17 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 18 | /** |
| 19 | * @brief Initialize kernel memory slab subsystem. |
| 20 | * |
| 21 | * Perform any initialization of memory slabs that wasn't done at build time. |
| 22 | * Currently this just involves creating the list of free blocks for each slab. |
| 23 | * |
Daniel Leung | d3b030b | 2022-01-06 16:28:14 -0800 | [diff] [blame] | 24 | * @retval 0 on success. |
| 25 | * @retval -EINVAL if @p slab contains invalid configuration and/or values. |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 26 | */ |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 27 | static int create_free_list(struct k_mem_slab *slab) |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 28 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 29 | uint32_t j; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 30 | char *p; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 31 | |
Nicolas Pitre | bc30f4f | 2019-05-21 21:15:00 -0400 | [diff] [blame] | 32 | /* blocks must be word aligned */ |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 33 | CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) & |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 34 | (sizeof(void *) - 1)) != 0U) { |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 35 | return -EINVAL; |
| 36 | } |
Nicolas Pitre | bc30f4f | 2019-05-21 21:15:00 -0400 | [diff] [blame] | 37 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 38 | slab->free_list = NULL; |
| 39 | p = slab->buffer; |
| 40 | |
Patrik Flykt | d0d9eb0 | 2018-11-29 11:13:40 -0800 | [diff] [blame] | 41 | for (j = 0U; j < slab->num_blocks; j++) { |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 42 | *(char **)p = slab->free_list; |
| 43 | slab->free_list = p; |
| 44 | p += slab->block_size; |
| 45 | } |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 46 | return 0; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | /** |
| 50 | * @brief Complete initialization of statically defined memory slabs. |
| 51 | * |
| 52 | * Perform any initialization that wasn't done at build time. |
| 53 | * |
Daniel Leung | d3b030b | 2022-01-06 16:28:14 -0800 | [diff] [blame] | 54 | * @return 0 on success, fails otherwise. |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 55 | */ |
Tomasz Bursztyka | e18fcbb | 2020-04-30 20:33:38 +0200 | [diff] [blame] | 56 | static int init_mem_slab_module(const struct device *dev) |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 57 | { |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 58 | int rc = 0; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 59 | ARG_UNUSED(dev); |
| 60 | |
Fabio Baltieri | f88a420 | 2021-08-04 23:05:54 +0100 | [diff] [blame] | 61 | STRUCT_SECTION_FOREACH(k_mem_slab, slab) { |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 62 | rc = create_free_list(slab); |
| 63 | if (rc < 0) { |
| 64 | goto out; |
| 65 | } |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 66 | z_object_init(slab); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 67 | } |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 68 | |
| 69 | out: |
| 70 | return rc; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 71 | } |
| 72 | |
Andrew Boie | 0b474ee | 2016-11-08 11:06:55 -0800 | [diff] [blame] | 73 | SYS_INIT(init_mem_slab_module, PRE_KERNEL_1, |
| 74 | CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); |
Allan Stephens | e7d2cc2 | 2016-10-19 16:10:46 -0500 | [diff] [blame] | 75 | |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 76 | int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 77 | size_t block_size, uint32_t num_blocks) |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 78 | { |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 79 | int rc = 0; |
| 80 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 81 | slab->num_blocks = num_blocks; |
| 82 | slab->block_size = block_size; |
| 83 | slab->buffer = buffer; |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 84 | slab->num_used = 0U; |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 85 | slab->lock = (struct k_spinlock) {}; |
Kamil Lazowski | 104f100 | 2020-09-11 14:27:55 +0200 | [diff] [blame] | 86 | |
| 87 | #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION |
| 88 | slab->max_used = 0U; |
| 89 | #endif |
| 90 | |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 91 | rc = create_free_list(slab); |
| 92 | if (rc < 0) { |
| 93 | goto out; |
| 94 | } |
Anas Nashif | 4d994af | 2021-04-18 23:24:40 -0400 | [diff] [blame] | 95 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 96 | z_waitq_init(&slab->wait_q); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 97 | z_object_init(slab); |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 98 | out: |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 99 | SYS_PORT_TRACING_OBJ_INIT(k_mem_slab, slab, rc); |
| 100 | |
Anas Nashif | dfc2bbc | 2019-06-16 09:22:21 -0400 | [diff] [blame] | 101 | return rc; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 102 | } |
| 103 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 104 | int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 105 | { |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 106 | k_spinlock_key_t key = k_spin_lock(&slab->lock); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 107 | int result; |
| 108 | |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 109 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, alloc, slab, timeout); |
| 110 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 111 | if (slab->free_list != NULL) { |
| 112 | /* take a free block */ |
| 113 | *mem = slab->free_list; |
| 114 | slab->free_list = *(char **)(slab->free_list); |
| 115 | slab->num_used++; |
Kamil Lazowski | 104f100 | 2020-09-11 14:27:55 +0200 | [diff] [blame] | 116 | |
| 117 | #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION |
| 118 | slab->max_used = MAX(slab->num_used, slab->max_used); |
| 119 | #endif |
| 120 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 121 | result = 0; |
Krzysztof Chruscinski | 3b4b7c3 | 2021-04-19 08:58:13 +0200 | [diff] [blame] | 122 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) || |
| 123 | !IS_ENABLED(CONFIG_MULTITHREADING)) { |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 124 | /* don't wait for a free block to become available */ |
| 125 | *mem = NULL; |
| 126 | result = -ENOMEM; |
| 127 | } else { |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 128 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mem_slab, alloc, slab, timeout); |
| 129 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 130 | /* wait for a free block or timeout */ |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 131 | result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 132 | if (result == 0) { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 133 | *mem = _current->base.swap_data; |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 134 | } |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 135 | |
| 136 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); |
| 137 | |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 138 | return result; |
| 139 | } |
| 140 | |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 141 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); |
| 142 | |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 143 | k_spin_unlock(&slab->lock, key); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 144 | |
| 145 | return result; |
| 146 | } |
| 147 | |
| 148 | void k_mem_slab_free(struct k_mem_slab *slab, void **mem) |
| 149 | { |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 150 | k_spinlock_key_t key = k_spin_lock(&slab->lock); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 151 | |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 152 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab); |
Krzysztof Chruscinski | 3b4b7c3 | 2021-04-19 08:58:13 +0200 | [diff] [blame] | 153 | if (slab->free_list == NULL && IS_ENABLED(CONFIG_MULTITHREADING)) { |
Kamil Lazowski | 303fca9 | 2020-09-14 12:08:29 +0200 | [diff] [blame] | 154 | struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); |
| 155 | |
| 156 | if (pending_thread != NULL) { |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 157 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); |
| 158 | |
Kamil Lazowski | 303fca9 | 2020-09-14 12:08:29 +0200 | [diff] [blame] | 159 | z_thread_return_value_set_with_data(pending_thread, 0, *mem); |
| 160 | z_ready_thread(pending_thread); |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 161 | z_reschedule(&slab->lock, key); |
Kamil Lazowski | 303fca9 | 2020-09-14 12:08:29 +0200 | [diff] [blame] | 162 | return; |
| 163 | } |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 164 | } |
Kamil Lazowski | 303fca9 | 2020-09-14 12:08:29 +0200 | [diff] [blame] | 165 | **(char ***) mem = slab->free_list; |
| 166 | slab->free_list = *(char **) mem; |
| 167 | slab->num_used--; |
Torbjörn Leksell | 65b376e | 2021-03-26 14:03:23 +0100 | [diff] [blame] | 168 | |
| 169 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); |
| 170 | |
Nicolas Pitre | 2bed37e | 2021-04-13 11:10:22 -0400 | [diff] [blame] | 171 | k_spin_unlock(&slab->lock, key); |
Benjamin Walsh | 7ef0f62 | 2016-10-24 17:04:43 -0400 | [diff] [blame] | 172 | } |
Peter Mitsis | 1244065 | 2022-07-06 10:52:15 -0400 | [diff] [blame] | 173 | |
| 174 | int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats) |
| 175 | { |
| 176 | if ((slab == NULL) || (stats == NULL)) { |
| 177 | return -EINVAL; |
| 178 | } |
| 179 | |
| 180 | k_spinlock_key_t key = k_spin_lock(&slab->lock); |
| 181 | |
| 182 | stats->allocated_bytes = slab->num_used * slab->block_size; |
| 183 | stats->free_bytes = (slab->num_blocks - slab->num_used) * slab->block_size; |
| 184 | #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION |
| 185 | stats->max_allocated_bytes = slab->max_used * slab->block_size; |
| 186 | #else |
| 187 | stats->max_allocated_bytes = 0; |
| 188 | #endif |
| 189 | |
| 190 | k_spin_unlock(&slab->lock, key); |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION |
| 196 | int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab) |
| 197 | { |
| 198 | if (slab == NULL) { |
| 199 | return -EINVAL; |
| 200 | } |
| 201 | |
| 202 | k_spinlock_key_t key = k_spin_lock(&slab->lock); |
| 203 | |
| 204 | slab->max_used = slab->num_used; |
| 205 | |
| 206 | k_spin_unlock(&slab->lock, key); |
| 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | #endif |