blob: b436e097be565c152bcb718f6bbbd0af713dc6a1 [file] [log] [blame]
Andy Ross0dd83b82020-04-03 10:01:03 -07001/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <kernel.h>
8#include <ksched.h>
9#include <wait_q.h>
10#include <init.h>
11
12void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
13{
14 z_waitq_init(&h->wait_q);
15 sys_heap_init(&h->heap, mem, bytes);
16}
17
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +020018static int statics_init(const struct device *unused)
Andy Ross0dd83b82020-04-03 10:01:03 -070019{
20 ARG_UNUSED(unused);
21 Z_STRUCT_SECTION_FOREACH(k_heap, h) {
22 k_heap_init(h, h->heap.init_mem, h->heap.init_bytes);
23 }
24 return 0;
25}
26
27SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
28
29void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
30{
Kumar Galaa1b77fd2020-05-27 11:26:57 -050031 int64_t now, end = z_timeout_end_calc(timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -070032 void *ret = NULL;
33 k_spinlock_key_t key = k_spin_lock(&h->lock);
34
35 __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
36
37 while (ret == NULL) {
38 ret = sys_heap_alloc(&h->heap, bytes);
39
40 now = z_tick_get();
41 if ((ret != NULL) || ((end - now) <= 0)) {
42 break;
43 }
44
45 (void) z_pend_curr(&h->lock, key, &h->wait_q,
46 K_TICKS(end - now));
47 key = k_spin_lock(&h->lock);
48 }
49
50 k_spin_unlock(&h->lock, key);
51 return ret;
52}
53
54void k_heap_free(struct k_heap *h, void *mem)
55{
56 k_spinlock_key_t key = k_spin_lock(&h->lock);
57
58 sys_heap_free(&h->heap, mem);
Andy Ross8f0959c2020-04-03 15:39:25 -070059
60 if (z_unpend_all(&h->wait_q) != 0) {
61 z_reschedule(&h->lock, key);
62 } else {
63 k_spin_unlock(&h->lock, key);
64 }
Andy Ross0dd83b82020-04-03 10:01:03 -070065}
Andy Ross8f0959c2020-04-03 15:39:25 -070066
67#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
68/* Compatibility layer for legacy k_mem_pool code on top of a k_heap
69 * backend.
70 */
71
72int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
73 size_t size, k_timeout_t timeout)
74{
75 block->id.heap = p->heap;
76 block->data = k_heap_alloc(p->heap, size, timeout);
77
78 /* The legacy API returns -EAGAIN on timeout expiration, but
79 * -ENOMEM if the timeout was K_NO_WAIT. Don't ask.
80 */
81 if (size != 0 && block->data == NULL) {
82 return K_TIMEOUT_EQ(timeout, K_NO_WAIT) ? -ENOMEM : -EAGAIN;
83 } else {
84 return 0;
85 }
86}
87
88void k_mem_pool_free_id(struct k_mem_block_id *id)
89{
90 k_heap_free(id->heap, id->data);
91}
92
93#endif /* CONFIG_MEM_POOL_HEAP_BACKEND */