blob: 606056244ac32ce647220d19189ab7a1dd2dfa9f [file] [log] [blame]
Andy Ross73cb9582017-05-09 10:42:39 -07001/*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <kernel.h>
Andy Ross73cb9582017-05-09 10:42:39 -07008#include <string.h>
Anas Nashif6ecadb02019-06-26 10:33:45 -04009#include <sys/math_extras.h>
Christopher Friedt135ffaf2020-11-26 08:19:10 -050010#include <sys/util.h>
Andy Ross73cb9582017-05-09 10:42:39 -070011
Christopher Friedt135ffaf2020-11-26 08:19:10 -050012static void *z_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t size)
Andrew Boiea2480bd2018-04-12 16:59:02 -070013{
Nicolas Pitrea2011d82021-01-15 22:39:02 -050014 void *mem;
Christopher Friedt135ffaf2020-11-26 08:19:10 -050015 struct k_heap **heap_ref;
Nicolas Pitrea2011d82021-01-15 22:39:02 -050016 size_t __align;
Christopher Friedt135ffaf2020-11-26 08:19:10 -050017
Andrew Boiea2480bd2018-04-12 16:59:02 -070018 /*
Nicolas Pitrea2011d82021-01-15 22:39:02 -050019 * Adjust the size to make room for our heap reference.
20 * Merge a rewind bit with align value (see sys_heap_aligned_alloc()).
21 * This allows for storing the heap pointer right below the aligned
22 * boundary without wasting any memory.
Andrew Boiea2480bd2018-04-12 16:59:02 -070023 */
Nicolas Pitrea2011d82021-01-15 22:39:02 -050024 if (size_add_overflow(size, sizeof(heap_ref), &size)) {
Andrew Boiea2480bd2018-04-12 16:59:02 -070025 return NULL;
26 }
Nicolas Pitrea2011d82021-01-15 22:39:02 -050027 __align = align | sizeof(heap_ref);
Andy Ross94139222020-10-01 09:43:04 -070028
Nicolas Pitrea2011d82021-01-15 22:39:02 -050029 mem = k_heap_aligned_alloc(heap, __align, size, K_NO_WAIT);
Christopher Friedt135ffaf2020-11-26 08:19:10 -050030 if (mem == NULL) {
Andrew Boiea2480bd2018-04-12 16:59:02 -070031 return NULL;
32 }
33
Nicolas Pitrea2011d82021-01-15 22:39:02 -050034 heap_ref = mem;
Christopher Friedt135ffaf2020-11-26 08:19:10 -050035 *heap_ref = heap;
Nicolas Pitrea2011d82021-01-15 22:39:02 -050036 mem = ++heap_ref;
37 __ASSERT(align == 0 || ((uintptr_t)mem & (align - 1)) == 0,
38 "misaligned memory at %p (align = %zu)", mem, align);
Andrew Boiea2480bd2018-04-12 16:59:02 -070039
Nicolas Pitrea2011d82021-01-15 22:39:02 -050040 return mem;
Christopher Friedt135ffaf2020-11-26 08:19:10 -050041}
42
Andrew Boiea2480bd2018-04-12 16:59:02 -070043void k_free(void *ptr)
44{
Christopher Friedt135ffaf2020-11-26 08:19:10 -050045 struct k_heap **heap_ref;
Andrew Boiea2480bd2018-04-12 16:59:02 -070046
Christopher Friedt135ffaf2020-11-26 08:19:10 -050047 if (ptr != NULL) {
Nicolas Pitrea2011d82021-01-15 22:39:02 -050048 heap_ref = ptr;
49 ptr = --heap_ref;
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010050
51 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_free, *heap_ref);
52
Christopher Friedt135ffaf2020-11-26 08:19:10 -050053 k_heap_free(*heap_ref, ptr);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010054
55 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_free, *heap_ref);
Andrew Boiea2480bd2018-04-12 16:59:02 -070056 }
57}
58
Andy Ross73cb9582017-05-09 10:42:39 -070059#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
60
Andy Rossc770cab2020-10-02 08:22:03 -070061K_HEAP_DEFINE(_system_heap, CONFIG_HEAP_MEM_POOL_SIZE);
62#define _SYSTEM_HEAP (&_system_heap)
Andy Ross73cb9582017-05-09 10:42:39 -070063
Christopher Friedt135ffaf2020-11-26 08:19:10 -050064void *k_aligned_alloc(size_t align, size_t size)
Andy Ross73cb9582017-05-09 10:42:39 -070065{
Christopher Friedt135ffaf2020-11-26 08:19:10 -050066 __ASSERT(align / sizeof(void *) >= 1
67 && (align % sizeof(void *)) == 0,
68 "align must be a multiple of sizeof(void *)");
69
70 __ASSERT((align & (align - 1)) == 0,
71 "align must be a power of 2");
72
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010073 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP);
74
75 void *ret = z_heap_aligned_alloc(_SYSTEM_HEAP, align, size);
76
77 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP, ret);
78
79 return ret;
80}
81
82void *k_malloc(size_t size)
83{
84 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_malloc, _SYSTEM_HEAP);
85
86 void *ret = k_aligned_alloc(sizeof(void *), size);
87
88 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_malloc, _SYSTEM_HEAP, ret);
89
90 return ret;
Andy Ross73cb9582017-05-09 10:42:39 -070091}
Andrew Boie7f95e832017-11-08 14:40:01 -080092
93void *k_calloc(size_t nmemb, size_t size)
94{
95 void *ret;
96 size_t bounds;
97
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010098 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_calloc, _SYSTEM_HEAP);
99
Jakob Olesenc8708d92019-05-07 10:17:35 -0700100 if (size_mul_overflow(nmemb, size, &bounds)) {
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100101 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, NULL);
102
Leandro Pereira85dcc972018-04-12 12:53:57 -0700103 return NULL;
104 }
105
Andrew Boie7f95e832017-11-08 14:40:01 -0800106 ret = k_malloc(bounds);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700107 if (ret != NULL) {
Flavio Ceolinda49f2e2018-09-11 19:09:03 -0700108 (void)memset(ret, 0, bounds);
Andrew Boie7f95e832017-11-08 14:40:01 -0800109 }
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +0100110
111 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, ret);
112
Andrew Boie7f95e832017-11-08 14:40:01 -0800113 return ret;
114}
Andrew Boie92e5bd72018-04-12 17:12:15 -0700115
116void k_thread_system_pool_assign(struct k_thread *thread)
117{
Andy Rossc770cab2020-10-02 08:22:03 -0700118 thread->resource_pool = _SYSTEM_HEAP;
Andrew Boie92e5bd72018-04-12 17:12:15 -0700119}
Andrew Boie6f654bb2019-05-22 10:38:43 -0700120#else
Andy Rossc770cab2020-10-02 08:22:03 -0700121#define _SYSTEM_HEAP NULL
Andy Ross73cb9582017-05-09 10:42:39 -0700122#endif
Andrew Boie92e5bd72018-04-12 17:12:15 -0700123
Daniel Leung0c9f9692020-12-15 13:37:11 -0800124void *z_thread_aligned_alloc(size_t align, size_t size)
Andrew Boie92e5bd72018-04-12 17:12:15 -0700125{
126 void *ret;
Andy Rossc770cab2020-10-02 08:22:03 -0700127 struct k_heap *heap;
Andrew Boie92e5bd72018-04-12 17:12:15 -0700128
Andrew Boie6f654bb2019-05-22 10:38:43 -0700129 if (k_is_in_isr()) {
Andy Rossc770cab2020-10-02 08:22:03 -0700130 heap = _SYSTEM_HEAP;
Andrew Boie6f654bb2019-05-22 10:38:43 -0700131 } else {
Andy Rossc770cab2020-10-02 08:22:03 -0700132 heap = _current->resource_pool;
Andrew Boie6f654bb2019-05-22 10:38:43 -0700133 }
134
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400135 if (heap != NULL) {
Daniel Leung0c9f9692020-12-15 13:37:11 -0800136 ret = z_heap_aligned_alloc(heap, align, size);
Andrew Boie92e5bd72018-04-12 17:12:15 -0700137 } else {
138 ret = NULL;
139 }
140
141 return ret;
142}