blob: 28f5c471bd85d2cfc405e05015c08660f0d56cf3 [file] [log] [blame]
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
8#include <zephyr/kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -04009
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020010#include <zephyr/toolchain.h>
11#include <zephyr/linker/sections.h>
12#include <zephyr/wait_q.h>
13#include <zephyr/sys/dlist.h>
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040014#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020015#include <zephyr/init.h>
16#include <zephyr/sys/check.h>
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040017
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040018/**
19 * @brief Initialize kernel memory slab subsystem.
20 *
21 * Perform any initialization of memory slabs that wasn't done at build time.
22 * Currently this just involves creating the list of free blocks for each slab.
23 *
Daniel Leungd3b030b2022-01-06 16:28:14 -080024 * @retval 0 on success.
25 * @retval -EINVAL if @p slab contains invalid configuration and/or values.
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040026 */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040027static int create_free_list(struct k_mem_slab *slab)
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040028{
Kumar Galaa1b77fd2020-05-27 11:26:57 -050029 uint32_t j;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040030 char *p;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040031
Nicolas Pitrebc30f4f2019-05-21 21:15:00 -040032 /* blocks must be word aligned */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040033 CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) &
Anas Nashifbbbc38b2021-03-29 10:03:49 -040034 (sizeof(void *) - 1)) != 0U) {
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040035 return -EINVAL;
36 }
Nicolas Pitrebc30f4f2019-05-21 21:15:00 -040037
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040038 slab->free_list = NULL;
39 p = slab->buffer;
40
Patrik Flyktd0d9eb02018-11-29 11:13:40 -080041 for (j = 0U; j < slab->num_blocks; j++) {
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040042 *(char **)p = slab->free_list;
43 slab->free_list = p;
44 p += slab->block_size;
45 }
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040046 return 0;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040047}
48
49/**
50 * @brief Complete initialization of statically defined memory slabs.
51 *
52 * Perform any initialization that wasn't done at build time.
53 *
Daniel Leungd3b030b2022-01-06 16:28:14 -080054 * @return 0 on success, fails otherwise.
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040055 */
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +020056static int init_mem_slab_module(const struct device *dev)
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040057{
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040058 int rc = 0;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040059 ARG_UNUSED(dev);
60
Fabio Baltierif88a4202021-08-04 23:05:54 +010061 STRUCT_SECTION_FOREACH(k_mem_slab, slab) {
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040062 rc = create_free_list(slab);
63 if (rc < 0) {
64 goto out;
65 }
Patrik Flykt4344e272019-03-08 14:19:05 -070066 z_object_init(slab);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040067 }
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040068
69out:
70 return rc;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040071}
72
Andrew Boie0b474ee2016-11-08 11:06:55 -080073SYS_INIT(init_mem_slab_module, PRE_KERNEL_1,
74 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Allan Stephense7d2cc22016-10-19 16:10:46 -050075
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040076int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
Kumar Galaa1b77fd2020-05-27 11:26:57 -050077 size_t block_size, uint32_t num_blocks)
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040078{
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040079 int rc = 0;
80
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040081 slab->num_blocks = num_blocks;
82 slab->block_size = block_size;
83 slab->buffer = buffer;
Patrik Flykt24d71432019-03-26 19:57:45 -060084 slab->num_used = 0U;
Nicolas Pitre2bed37e2021-04-13 11:10:22 -040085 slab->lock = (struct k_spinlock) {};
Kamil Lazowski104f1002020-09-11 14:27:55 +020086
87#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
88 slab->max_used = 0U;
89#endif
90
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040091 rc = create_free_list(slab);
92 if (rc < 0) {
93 goto out;
94 }
Anas Nashif4d994af2021-04-18 23:24:40 -040095
Patrik Flykt4344e272019-03-08 14:19:05 -070096 z_waitq_init(&slab->wait_q);
Patrik Flykt4344e272019-03-08 14:19:05 -070097 z_object_init(slab);
Anas Nashifdfc2bbc2019-06-16 09:22:21 -040098out:
Torbjörn Leksell65b376e2021-03-26 14:03:23 +010099 SYS_PORT_TRACING_OBJ_INIT(k_mem_slab, slab, rc);
100
Anas Nashifdfc2bbc2019-06-16 09:22:21 -0400101 return rc;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400102}
103
Andy Ross78327382020-03-05 15:18:14 -0800104int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400105{
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400106 k_spinlock_key_t key = k_spin_lock(&slab->lock);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400107 int result;
108
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100109 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, alloc, slab, timeout);
110
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400111 if (slab->free_list != NULL) {
112 /* take a free block */
113 *mem = slab->free_list;
114 slab->free_list = *(char **)(slab->free_list);
115 slab->num_used++;
Kamil Lazowski104f1002020-09-11 14:27:55 +0200116
117#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
118 slab->max_used = MAX(slab->num_used, slab->max_used);
119#endif
120
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400121 result = 0;
Krzysztof Chruscinski3b4b7c32021-04-19 08:58:13 +0200122 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
123 !IS_ENABLED(CONFIG_MULTITHREADING)) {
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400124 /* don't wait for a free block to become available */
125 *mem = NULL;
126 result = -ENOMEM;
127 } else {
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100128 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mem_slab, alloc, slab, timeout);
129
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400130 /* wait for a free block or timeout */
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400131 result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400132 if (result == 0) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500133 *mem = _current->base.swap_data;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400134 }
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100135
136 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
137
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400138 return result;
139 }
140
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100141 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
142
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400143 k_spin_unlock(&slab->lock, key);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400144
145 return result;
146}
147
148void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
149{
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400150 k_spinlock_key_t key = k_spin_lock(&slab->lock);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400151
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100152 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
Krzysztof Chruscinski3b4b7c32021-04-19 08:58:13 +0200153 if (slab->free_list == NULL && IS_ENABLED(CONFIG_MULTITHREADING)) {
Kamil Lazowski303fca92020-09-14 12:08:29 +0200154 struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
155
156 if (pending_thread != NULL) {
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100157 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
158
Kamil Lazowski303fca92020-09-14 12:08:29 +0200159 z_thread_return_value_set_with_data(pending_thread, 0, *mem);
160 z_ready_thread(pending_thread);
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400161 z_reschedule(&slab->lock, key);
Kamil Lazowski303fca92020-09-14 12:08:29 +0200162 return;
163 }
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400164 }
Kamil Lazowski303fca92020-09-14 12:08:29 +0200165 **(char ***) mem = slab->free_list;
166 slab->free_list = *(char **) mem;
167 slab->num_used--;
Torbjörn Leksell65b376e2021-03-26 14:03:23 +0100168
169 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
170
Nicolas Pitre2bed37e2021-04-13 11:10:22 -0400171 k_spin_unlock(&slab->lock, key);
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400172}
Peter Mitsis12440652022-07-06 10:52:15 -0400173
174int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
175{
176 if ((slab == NULL) || (stats == NULL)) {
177 return -EINVAL;
178 }
179
180 k_spinlock_key_t key = k_spin_lock(&slab->lock);
181
182 stats->allocated_bytes = slab->num_used * slab->block_size;
183 stats->free_bytes = (slab->num_blocks - slab->num_used) * slab->block_size;
184#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
185 stats->max_allocated_bytes = slab->max_used * slab->block_size;
186#else
187 stats->max_allocated_bytes = 0;
188#endif
189
190 k_spin_unlock(&slab->lock, key);
191
192 return 0;
193}
194
195#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
196int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
197{
198 if (slab == NULL) {
199 return -EINVAL;
200 }
201
202 k_spinlock_key_t key = k_spin_lock(&slab->lock);
203
204 slab->max_used = slab->num_used;
205
206 k_spin_unlock(&slab->lock, key);
207
208 return 0;
209}
210#endif