blob: ccdd66907880c08d0486f6aabdfe5ecaa141418a [file] [log] [blame]
Andy Ross0dd83b82020-04-03 10:01:03 -07001/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/init.h>
9#include <zephyr/linker/linker-defs.h>
Gerard Marull-Paretasdacb3db2023-05-15 15:50:28 +020010#include <zephyr/sys/iterable_sections.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000011/* private kernel APIs */
12#include <ksched.h>
13#include <wait_q.h>
Andy Ross0dd83b82020-04-03 10:01:03 -070014
Anas Nashif477a04a2024-02-28 08:15:15 -050015void k_heap_init(struct k_heap *heap, void *mem, size_t bytes)
Andy Ross0dd83b82020-04-03 10:01:03 -070016{
Anas Nashif477a04a2024-02-28 08:15:15 -050017 z_waitq_init(&heap->wait_q);
18 sys_heap_init(&heap->heap, mem, bytes);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010019
Anas Nashif477a04a2024-02-28 08:15:15 -050020 SYS_PORT_TRACING_OBJ_INIT(k_heap, heap);
Andy Ross0dd83b82020-04-03 10:01:03 -070021}
22
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +020023static int statics_init(void)
Andy Ross0dd83b82020-04-03 10:01:03 -070024{
Anas Nashif477a04a2024-02-28 08:15:15 -050025 STRUCT_SECTION_FOREACH(k_heap, heap) {
Daniel Leung5c4fff32021-08-03 13:59:36 -070026#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
27 /* Some heaps may not present at boot, so we need to wait for
28 * paging mechanism to be initialized before we can initialize
29 * each heap.
30 */
31 extern bool z_sys_post_kernel;
32 bool do_clear = z_sys_post_kernel;
33
34 /* During pre-kernel init, z_sys_post_kernel == false,
35 * initialize if within pinned region. Otherwise skip.
36 * In post-kernel init, z_sys_post_kernel == true, skip those in
37 * pinned region as they have already been initialized and
38 * possibly already in use. Otherwise initialize.
39 */
Anas Nashif477a04a2024-02-28 08:15:15 -050040 if (lnkr_is_pinned((uint8_t *)heap) &&
41 lnkr_is_pinned((uint8_t *)&heap->wait_q) &&
42 lnkr_is_region_pinned((uint8_t *)heap->heap.init_mem,
43 heap->heap.init_bytes)) {
Daniel Leung5c4fff32021-08-03 13:59:36 -070044 do_clear = !do_clear;
45 }
46
47 if (do_clear)
48#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
49 {
Anas Nashif477a04a2024-02-28 08:15:15 -050050 k_heap_init(heap, heap->heap.init_mem, heap->heap.init_bytes);
Daniel Leung5c4fff32021-08-03 13:59:36 -070051 }
Andy Ross0dd83b82020-04-03 10:01:03 -070052 }
53 return 0;
54}
55
Jordan Yates6f41d522022-07-02 12:06:55 +100056SYS_INIT_NAMED(statics_init_pre, statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Andy Ross0dd83b82020-04-03 10:01:03 -070057
Daniel Leung5c4fff32021-08-03 13:59:36 -070058#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
59/* Need to wait for paging mechanism to be initialized before
60 * heaps that are not in pinned sections can be initialized.
61 */
Jordan Yates6f41d522022-07-02 12:06:55 +100062SYS_INIT_NAMED(statics_init_post, statics_init, POST_KERNEL, 0);
Daniel Leung5c4fff32021-08-03 13:59:36 -070063#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
64
Anas Nashif477a04a2024-02-28 08:15:15 -050065void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
Maximilian Bachmann34d7c782020-11-13 15:12:31 +010066 k_timeout_t timeout)
Andy Ross0dd83b82020-04-03 10:01:03 -070067{
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040068 k_timepoint_t end = sys_timepoint_calc(timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -070069 void *ret = NULL;
Jay Shoenb4734d52022-09-28 17:59:45 -060070
Anas Nashif477a04a2024-02-28 08:15:15 -050071 k_spinlock_key_t key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -070072
Anas Nashif477a04a2024-02-28 08:15:15 -050073 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010074
Andy Ross0dd83b82020-04-03 10:01:03 -070075 __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
76
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010077 bool blocked_alloc = false;
78
Andy Ross0dd83b82020-04-03 10:01:03 -070079 while (ret == NULL) {
Anas Nashif477a04a2024-02-28 08:15:15 -050080 ret = sys_heap_aligned_alloc(&heap->heap, align, bytes);
Andy Ross0dd83b82020-04-03 10:01:03 -070081
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +020082 if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040083 (ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Ross0dd83b82020-04-03 10:01:03 -070084 break;
85 }
86
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010087 if (!blocked_alloc) {
88 blocked_alloc = true;
89
Anas Nashif477a04a2024-02-28 08:15:15 -050090 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010091 } else {
92 /**
93 * @todo Trace attempt to avoid empty trace segments
94 */
95 }
96
Nicolas Pitre77b7eb12023-07-06 14:58:03 -040097 timeout = sys_timepoint_timeout(end);
Anas Nashif477a04a2024-02-28 08:15:15 -050098 (void) z_pend_curr(&heap->lock, key, &heap->wait_q, timeout);
99 key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -0700100 }
101
Anas Nashif477a04a2024-02-28 08:15:15 -0500102 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100103
Anas Nashif477a04a2024-02-28 08:15:15 -0500104 k_spin_unlock(&heap->lock, key);
Andy Ross0dd83b82020-04-03 10:01:03 -0700105 return ret;
106}
107
Anas Nashif477a04a2024-02-28 08:15:15 -0500108void *k_heap_alloc(struct k_heap *heap, size_t bytes, k_timeout_t timeout)
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100109{
Anas Nashif477a04a2024-02-28 08:15:15 -0500110 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, heap, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100111
Anas Nashif477a04a2024-02-28 08:15:15 -0500112 void *ret = k_heap_aligned_alloc(heap, sizeof(void *), bytes, timeout);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100113
Anas Nashif477a04a2024-02-28 08:15:15 -0500114 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, heap, timeout, ret);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100115
116 return ret;
117}
118
Anas Nashif477a04a2024-02-28 08:15:15 -0500119void k_heap_free(struct k_heap *heap, void *mem)
Andy Ross0dd83b82020-04-03 10:01:03 -0700120{
Anas Nashif477a04a2024-02-28 08:15:15 -0500121 k_spinlock_key_t key = k_spin_lock(&heap->lock);
Andy Ross0dd83b82020-04-03 10:01:03 -0700122
Anas Nashif477a04a2024-02-28 08:15:15 -0500123 sys_heap_free(&heap->heap, mem);
Andy Ross8f0959c2020-04-03 15:39:25 -0700124
Anas Nashif477a04a2024-02-28 08:15:15 -0500125 SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
Hess Nathan6d417d52024-04-30 13:26:35 +0200126 if (IS_ENABLED(CONFIG_MULTITHREADING) && (z_unpend_all(&heap->wait_q) != 0)) {
Anas Nashif477a04a2024-02-28 08:15:15 -0500127 z_reschedule(&heap->lock, key);
Andy Ross8f0959c2020-04-03 15:39:25 -0700128 } else {
Anas Nashif477a04a2024-02-28 08:15:15 -0500129 k_spin_unlock(&heap->lock, key);
Andy Ross8f0959c2020-04-03 15:39:25 -0700130 }
Andy Ross0dd83b82020-04-03 10:01:03 -0700131}