blob: 245558c85fefd7e0acabbde5f36c0a49b47b8c7d [file] [log] [blame]
Andy Ross0dd83b82020-04-03 10:01:03 -07001/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/init.h>
9#include <zephyr/linker/linker-defs.h>
Gerard Marull-Paretasdacb3db2023-05-15 15:50:28 +020010#include <zephyr/sys/iterable_sections.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000011/* private kernel APIs */
12#include <ksched.h>
13#include <wait_q.h>
Andy Ross0dd83b82020-04-03 10:01:03 -070014
Anas Nashif477a04a2024-02-28 08:15:15 -050015void k_heap_init(struct k_heap *heap, void *mem, size_t bytes)
Andy Ross0dd83b82020-04-03 10:01:03 -070016{
Anas Nashif477a04a2024-02-28 08:15:15 -050017 z_waitq_init(&heap->wait_q);
Peter Mitsis11083fc2025-01-30 12:18:56 -080018 heap->lock = (struct k_spinlock) {};
Anas Nashif477a04a2024-02-28 08:15:15 -050019 sys_heap_init(&heap->heap, mem, bytes);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010020
Anas Nashif477a04a2024-02-28 08:15:15 -050021 SYS_PORT_TRACING_OBJ_INIT(k_heap, heap);
Andy Ross0dd83b82020-04-03 10:01:03 -070022}
23
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +020024static int statics_init(void)
Andy Ross0dd83b82020-04-03 10:01:03 -070025{
Anas Nashif477a04a2024-02-28 08:15:15 -050026 STRUCT_SECTION_FOREACH(k_heap, heap) {
Daniel Leung5c4fff32021-08-03 13:59:36 -070027#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
28 /* Some heaps may not present at boot, so we need to wait for
29 * paging mechanism to be initialized before we can initialize
30 * each heap.
31 */
32 extern bool z_sys_post_kernel;
33 bool do_clear = z_sys_post_kernel;
34
35 /* During pre-kernel init, z_sys_post_kernel == false,
36 * initialize if within pinned region. Otherwise skip.
37 * In post-kernel init, z_sys_post_kernel == true, skip those in
38 * pinned region as they have already been initialized and
39 * possibly already in use. Otherwise initialize.
40 */
Anas Nashif477a04a2024-02-28 08:15:15 -050041 if (lnkr_is_pinned((uint8_t *)heap) &&
42 lnkr_is_pinned((uint8_t *)&heap->wait_q) &&
43 lnkr_is_region_pinned((uint8_t *)heap->heap.init_mem,
44 heap->heap.init_bytes)) {
Daniel Leung5c4fff32021-08-03 13:59:36 -070045 do_clear = !do_clear;
46 }
47
48 if (do_clear)
49#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
50 {
Anas Nashif477a04a2024-02-28 08:15:15 -050051 k_heap_init(heap, heap->heap.init_mem, heap->heap.init_bytes);
Daniel Leung5c4fff32021-08-03 13:59:36 -070052 }
Andy Ross0dd83b82020-04-03 10:01:03 -070053 }
54 return 0;
55}
56
Jordan Yates6f41d522022-07-02 12:06:55 +100057SYS_INIT_NAMED(statics_init_pre, statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Andy Ross0dd83b82020-04-03 10:01:03 -070058
Daniel Leung5c4fff32021-08-03 13:59:36 -070059#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
60/* Need to wait for paging mechanism to be initialized before
61 * heaps that are not in pinned sections can be initialized.
62 */
Jordan Yates6f41d522022-07-02 12:06:55 +100063SYS_INIT_NAMED(statics_init_post, statics_init, POST_KERNEL, 0);
Daniel Leung5c4fff32021-08-03 13:59:36 -070064#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
65
Anas Nashif477a04a2024-02-28 08:15:15 -050066void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
Maximilian Bachmann34d7c782020-11-13 15:12:31 +010067 k_timeout_t timeout)
Andy Ross0dd83b82020-04-03 10:01:03 -070068{
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040069 k_timepoint_t end = sys_timepoint_calc(timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -070070 void *ret = NULL;
Jay Shoenb4734d52022-09-28 17:59:45 -060071
Anas Nashif477a04a2024-02-28 08:15:15 -050072 k_spinlock_key_t key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -070073
Anas Nashif477a04a2024-02-28 08:15:15 -050074 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010075
Andy Ross0dd83b82020-04-03 10:01:03 -070076 __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
77
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010078 bool blocked_alloc = false;
79
Andy Ross0dd83b82020-04-03 10:01:03 -070080 while (ret == NULL) {
Anas Nashif477a04a2024-02-28 08:15:15 -050081 ret = sys_heap_aligned_alloc(&heap->heap, align, bytes);
Andy Ross0dd83b82020-04-03 10:01:03 -070082
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +020083 if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040084 (ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Ross0dd83b82020-04-03 10:01:03 -070085 break;
86 }
87
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010088 if (!blocked_alloc) {
89 blocked_alloc = true;
90
Anas Nashif477a04a2024-02-28 08:15:15 -050091 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010092 } else {
93 /**
94 * @todo Trace attempt to avoid empty trace segments
95 */
96 }
97
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040098 timeout = sys_timepoint_timeout(end);
Anas Nashif477a04a2024-02-28 08:15:15 -050099 (void) z_pend_curr(&heap->lock, key, &heap->wait_q, timeout);
100 key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -0700101 }
102
Anas Nashif477a04a2024-02-28 08:15:15 -0500103 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100104
Anas Nashif477a04a2024-02-28 08:15:15 -0500105 k_spin_unlock(&heap->lock, key);
Andy Ross0dd83b82020-04-03 10:01:03 -0700106 return ret;
107}
108
Anas Nashif477a04a2024-02-28 08:15:15 -0500109void *k_heap_alloc(struct k_heap *heap, size_t bytes, k_timeout_t timeout)
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100110{
Anas Nashif477a04a2024-02-28 08:15:15 -0500111 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100112
Anas Nashif477a04a2024-02-28 08:15:15 -0500113 void *ret = k_heap_aligned_alloc(heap, sizeof(void *), bytes, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100114
Anas Nashif477a04a2024-02-28 08:15:15 -0500115 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, heap, timeout, ret);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100116
117 return ret;
118}
119
Simone Orru37fd7112024-12-18 11:38:05 +0100120void *k_heap_calloc(struct k_heap *heap, size_t num, size_t size, k_timeout_t timeout)
121{
122 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, calloc, heap, timeout);
123
124 void *ret = NULL;
125 size_t bounds = 0U;
126
127 if (!size_mul_overflow(num, size, &bounds)) {
128 ret = k_heap_alloc(heap, bounds, timeout);
129 }
130 if (ret != NULL) {
131 (void)memset(ret, 0, bounds);
132 }
133
134 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, calloc, heap, timeout, ret);
135
136 return ret;
137}
138
Fin Maaß09eaa872024-05-13 09:15:35 +0200139void *k_heap_realloc(struct k_heap *heap, void *ptr, size_t bytes, k_timeout_t timeout)
140{
141 k_timepoint_t end = sys_timepoint_calc(timeout);
142 void *ret = NULL;
143
144 k_spinlock_key_t key = k_spin_lock(&heap->lock);
145
Fin Maaß8c37f142024-05-13 09:17:24 +0200146 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, realloc, heap, ptr, bytes, timeout);
147
Fin Maaß09eaa872024-05-13 09:15:35 +0200148 __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
149
150 while (ret == NULL) {
151 ret = sys_heap_aligned_realloc(&heap->heap, ptr, sizeof(void *), bytes);
152
153 if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
154 (ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
155 break;
156 }
157
158 timeout = sys_timepoint_timeout(end);
159 (void) z_pend_curr(&heap->lock, key, &heap->wait_q, timeout);
160 key = k_spin_lock(&heap->lock);
161 }
162
Fin Maaß8c37f142024-05-13 09:17:24 +0200163 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, realloc, heap, ptr, bytes, timeout, ret);
164
Fin Maaß09eaa872024-05-13 09:15:35 +0200165 k_spin_unlock(&heap->lock, key);
166 return ret;
167}
168
Anas Nashif477a04a2024-02-28 08:15:15 -0500169void k_heap_free(struct k_heap *heap, void *mem)
Andy Ross0dd83b82020-04-03 10:01:03 -0700170{
Anas Nashif477a04a2024-02-28 08:15:15 -0500171 k_spinlock_key_t key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -0700172
Anas Nashif477a04a2024-02-28 08:15:15 -0500173 sys_heap_free(&heap->heap, mem);
Andy Ross8f0959c2020-04-03 15:39:25 -0700174
Anas Nashif477a04a2024-02-28 08:15:15 -0500175 SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
Hess Nathan6d417d52024-04-30 13:26:35 +0200176 if (IS_ENABLED(CONFIG_MULTITHREADING) && (z_unpend_all(&heap->wait_q) != 0)) {
Anas Nashif477a04a2024-02-28 08:15:15 -0500177 z_reschedule(&heap->lock, key);
Andy Ross8f0959c2020-04-03 15:39:25 -0700178 } else {
Anas Nashif477a04a2024-02-28 08:15:15 -0500179 k_spin_unlock(&heap->lock, key);
Andy Ross8f0959c2020-04-03 15:39:25 -0700180 }
Andy Ross0dd83b82020-04-03 10:01:03 -0700181}