blob: 8726342aef6f0a0a19dbd79352daab1a047859fa [file] [log] [blame]
Andy Ross0dd83b82020-04-03 10:01:03 -07001/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <kernel.h>
8#include <ksched.h>
9#include <wait_q.h>
10#include <init.h>
11
12void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
13{
14 z_waitq_init(&h->wait_q);
15 sys_heap_init(&h->heap, mem, bytes);
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010016
17 SYS_PORT_TRACING_OBJ_INIT(k_heap, h);
Andy Ross0dd83b82020-04-03 10:01:03 -070018}
19
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +020020static int statics_init(const struct device *unused)
Andy Ross0dd83b82020-04-03 10:01:03 -070021{
22 ARG_UNUSED(unused);
23 Z_STRUCT_SECTION_FOREACH(k_heap, h) {
24 k_heap_init(h, h->heap.init_mem, h->heap.init_bytes);
25 }
26 return 0;
27}
28
29SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
30
Maximilian Bachmann34d7c782020-11-13 15:12:31 +010031void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
32 k_timeout_t timeout)
Andy Ross0dd83b82020-04-03 10:01:03 -070033{
Anas Nashifa518f482021-03-13 08:22:38 -050034 int64_t now, end = sys_clock_timeout_end_calc(timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -070035 void *ret = NULL;
36 k_spinlock_key_t key = k_spin_lock(&h->lock);
37
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010038 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, h, timeout);
39
Andy Ross0dd83b82020-04-03 10:01:03 -070040 __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
41
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010042 bool blocked_alloc = false;
43
Andy Ross0dd83b82020-04-03 10:01:03 -070044 while (ret == NULL) {
Maximilian Bachmann34d7c782020-11-13 15:12:31 +010045 ret = sys_heap_aligned_alloc(&h->heap, align, bytes);
Andy Ross0dd83b82020-04-03 10:01:03 -070046
Anas Nashiffe0872c2021-03-13 08:21:21 -050047 now = sys_clock_tick_get();
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +020048 if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
49 (ret != NULL) || ((end - now) <= 0)) {
Andy Ross0dd83b82020-04-03 10:01:03 -070050 break;
51 }
52
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010053 if (!blocked_alloc) {
54 blocked_alloc = true;
55
56 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, h, timeout);
57 } else {
58 /**
59 * @todo Trace attempt to avoid empty trace segments
60 */
61 }
62
Andy Ross0dd83b82020-04-03 10:01:03 -070063 (void) z_pend_curr(&h->lock, key, &h->wait_q,
64 K_TICKS(end - now));
65 key = k_spin_lock(&h->lock);
66 }
67
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010068 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, h, timeout, ret);
69
Andy Ross0dd83b82020-04-03 10:01:03 -070070 k_spin_unlock(&h->lock, key);
71 return ret;
72}
73
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010074void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
75{
76 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, h, timeout);
77
78 void *ret = k_heap_aligned_alloc(h, sizeof(void *), bytes, timeout);
79
80 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, h, timeout, ret);
81
82 return ret;
83}
84
Andy Ross0dd83b82020-04-03 10:01:03 -070085void k_heap_free(struct k_heap *h, void *mem)
86{
87 k_spinlock_key_t key = k_spin_lock(&h->lock);
88
89 sys_heap_free(&h->heap, mem);
Andy Ross8f0959c2020-04-03 15:39:25 -070090
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +010091 SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, h);
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +020092 if (IS_ENABLED(CONFIG_MULTITHREADING) && z_unpend_all(&h->wait_q) != 0) {
Andy Ross8f0959c2020-04-03 15:39:25 -070093 z_reschedule(&h->lock, key);
94 } else {
95 k_spin_unlock(&h->lock, key);
96 }
Andy Ross0dd83b82020-04-03 10:01:03 -070097}