Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017 Linaro Limited |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | #include <init.h> |
| 8 | #include <kernel.h> |
| 9 | #include <kernel_structs.h> |
Ramakrishna Pallala | 301acb8 | 2018-01-31 10:11:47 +0530 | [diff] [blame] | 10 | #include <kernel_internal.h> |
Anas Nashif | 5eb90ec | 2019-06-26 10:33:39 -0400 | [diff] [blame] | 11 | #include <sys/__assert.h> |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 12 | #include <stdbool.h> |
Andy Ross | 04382b9 | 2018-07-24 11:35:55 -0700 | [diff] [blame] | 13 | #include <spinlock.h> |
Andrew Boie | 9bfc8d8 | 2020-08-25 14:42:33 -0700 | [diff] [blame] | 14 | #include <sys/libc-hooks.h> |
Andrew Boie | bc35b0b | 2020-08-19 13:11:47 -0700 | [diff] [blame] | 15 | #include <logging/log.h> |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame^] | 16 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Andrew Boie | bc35b0b | 2020-08-19 13:11:47 -0700 | [diff] [blame] | 17 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 18 | struct k_spinlock z_mem_domain_lock; |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 19 | static uint8_t max_partitions; |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 20 | |
Andrew Boie | 9bfc8d8 | 2020-08-25 14:42:33 -0700 | [diff] [blame] | 21 | struct k_mem_domain k_mem_domain_default; |
| 22 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 23 | #if __ASSERT_ON |
| 24 | static bool check_add_partition(struct k_mem_domain *domain, |
| 25 | struct k_mem_partition *part) |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 26 | { |
Leandro Pereira | b007b64 | 2017-10-17 17:01:48 -0700 | [diff] [blame] | 27 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 28 | int i; |
| 29 | uintptr_t pstart, pend, dstart, dend; |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 30 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 31 | if (part == NULL) { |
| 32 | LOG_ERR("NULL k_mem_partition provided"); |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 33 | return false; |
| 34 | } |
| 35 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 36 | #ifdef CONFIG_EXECUTE_XOR_WRITE |
| 37 | /* Arches where execution cannot be disabled should always return |
| 38 | * false to this check |
| 39 | */ |
| 40 | if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) && |
| 41 | K_MEM_PARTITION_IS_WRITABLE(part->attr)) { |
| 42 | LOG_ERR("partition is writable and executable <start %lx>", |
| 43 | part->start); |
Ioannis Glaropoulos | 6c54cac | 2018-11-13 16:28:55 +0100 | [diff] [blame] | 44 | return false; |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 45 | } |
Ioannis Glaropoulos | 6c54cac | 2018-11-13 16:28:55 +0100 | [diff] [blame] | 46 | #endif |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 47 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 48 | if (part->size == 0) { |
| 49 | LOG_ERR("zero sized partition at %p with base 0x%lx", |
| 50 | part, part->start); |
| 51 | return false; |
| 52 | } |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 53 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 54 | pstart = part->start; |
| 55 | pend = part->start + part->size; |
| 56 | |
| 57 | if (pend <= pstart) { |
| 58 | LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu", |
| 59 | part, part->start, part->size); |
| 60 | return false; |
| 61 | } |
| 62 | |
| 63 | /* Check that this partition doesn't overlap any existing ones already |
| 64 | * in the domain |
| 65 | */ |
| 66 | for (i = 0; i < domain->num_partitions; i++) { |
| 67 | struct k_mem_partition *dpart = &domain->partitions[i]; |
| 68 | |
| 69 | if (dpart->size == 0) { |
| 70 | /* Unused slot */ |
| 71 | continue; |
| 72 | } |
| 73 | |
| 74 | dstart = dpart->start; |
| 75 | dend = dstart + dpart->size; |
| 76 | |
| 77 | if (pend > dstart && dend > pstart) { |
| 78 | LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)", |
| 79 | part, part->start, part->size, |
| 80 | dpart->start, dpart->size); |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 81 | return false; |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | return true; |
Leandro Pereira | b007b64 | 2017-10-17 17:01:48 -0700 | [diff] [blame] | 86 | } |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 87 | #endif |
| 88 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 89 | void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts, |
Leandro Pereira | 08de658 | 2018-02-28 14:22:57 -0800 | [diff] [blame] | 90 | struct k_mem_partition *parts[]) |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 91 | { |
Andy Ross | 04382b9 | 2018-07-24 11:35:55 -0700 | [diff] [blame] | 92 | k_spinlock_key_t key; |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 93 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 94 | __ASSERT_NO_MSG(domain != NULL); |
| 95 | __ASSERT(num_parts == 0U || parts != NULL, |
| 96 | "parts array is NULL and num_parts is nonzero"); |
| 97 | __ASSERT(num_parts <= max_partitions, |
| 98 | "num_parts of %d exceeds maximum allowable partitions (%d)", |
| 99 | num_parts, max_partitions); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 100 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 101 | key = k_spin_lock(&z_mem_domain_lock); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 102 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 103 | domain->num_partitions = 0U; |
Flavio Ceolin | da49f2e | 2018-09-11 19:09:03 -0700 | [diff] [blame] | 104 | (void)memset(domain->partitions, 0, sizeof(domain->partitions)); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 105 | sys_dlist_init(&domain->mem_domain_q); |
| 106 | |
Andrew Boie | bc35b0b | 2020-08-19 13:11:47 -0700 | [diff] [blame] | 107 | #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA |
| 108 | int ret = arch_mem_domain_init(domain); |
| 109 | |
| 110 | /* TODO propagate return values, see #24609. |
| 111 | * |
| 112 | * Not using an assertion here as this is a memory allocation error |
| 113 | */ |
| 114 | if (ret != 0) { |
| 115 | LOG_ERR("architecture-specific initialization failed for domain %p with %d", |
| 116 | domain, ret); |
| 117 | k_panic(); |
| 118 | } |
| 119 | #endif |
Andrew Boie | 52bf482 | 2020-10-16 16:49:41 -0700 | [diff] [blame] | 120 | if (num_parts != 0U) { |
| 121 | uint32_t i; |
| 122 | |
| 123 | for (i = 0U; i < num_parts; i++) { |
| 124 | __ASSERT(check_add_partition(domain, parts[i]), |
| 125 | "invalid partition index %d (%p)", |
| 126 | i, parts[i]); |
| 127 | |
| 128 | domain->partitions[i] = *parts[i]; |
| 129 | domain->num_partitions++; |
| 130 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
| 131 | arch_mem_domain_partition_add(domain, i); |
| 132 | #endif |
| 133 | } |
| 134 | } |
Andrew Boie | bc35b0b | 2020-08-19 13:11:47 -0700 | [diff] [blame] | 135 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 136 | k_spin_unlock(&z_mem_domain_lock, key); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 137 | } |
| 138 | |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 139 | void k_mem_domain_add_partition(struct k_mem_domain *domain, |
Leandro Pereira | db094b8 | 2018-02-28 14:11:41 -0800 | [diff] [blame] | 140 | struct k_mem_partition *part) |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 141 | { |
| 142 | int p_idx; |
Andy Ross | 04382b9 | 2018-07-24 11:35:55 -0700 | [diff] [blame] | 143 | k_spinlock_key_t key; |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 144 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 145 | __ASSERT_NO_MSG(domain != NULL); |
| 146 | __ASSERT(check_add_partition(domain, part), |
| 147 | "invalid partition %p", part); |
Leandro Pereira | b007b64 | 2017-10-17 17:01:48 -0700 | [diff] [blame] | 148 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 149 | key = k_spin_lock(&z_mem_domain_lock); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 150 | |
| 151 | for (p_idx = 0; p_idx < max_partitions; p_idx++) { |
| 152 | /* A zero-sized partition denotes it's a free partition */ |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 153 | if (domain->partitions[p_idx].size == 0U) { |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 154 | break; |
| 155 | } |
| 156 | } |
| 157 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 158 | __ASSERT(p_idx < max_partitions, |
| 159 | "no free partition slots available"); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 160 | |
Andrew Boie | d11e3c3 | 2020-08-25 16:47:44 -0700 | [diff] [blame] | 161 | LOG_DBG("add partition base %lx size %zu to domain %p\n", |
| 162 | part->start, part->size, domain); |
| 163 | |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 164 | domain->partitions[p_idx].start = part->start; |
| 165 | domain->partitions[p_idx].size = part->size; |
| 166 | domain->partitions[p_idx].attr = part->attr; |
| 167 | |
| 168 | domain->num_partitions++; |
| 169 | |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 170 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 171 | arch_mem_domain_partition_add(domain, p_idx); |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 172 | #endif |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 173 | k_spin_unlock(&z_mem_domain_lock, key); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | void k_mem_domain_remove_partition(struct k_mem_domain *domain, |
| 177 | struct k_mem_partition *part) |
| 178 | { |
| 179 | int p_idx; |
Andy Ross | 04382b9 | 2018-07-24 11:35:55 -0700 | [diff] [blame] | 180 | k_spinlock_key_t key; |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 181 | |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 182 | __ASSERT_NO_MSG(domain != NULL); |
| 183 | __ASSERT_NO_MSG(part != NULL); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 184 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 185 | key = k_spin_lock(&z_mem_domain_lock); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 186 | |
| 187 | /* find a partition that matches the given start and size */ |
| 188 | for (p_idx = 0; p_idx < max_partitions; p_idx++) { |
| 189 | if (domain->partitions[p_idx].start == part->start && |
| 190 | domain->partitions[p_idx].size == part->size) { |
| 191 | break; |
| 192 | } |
| 193 | } |
| 194 | |
Andrew Boie | 475d279 | 2019-03-01 18:12:49 -0800 | [diff] [blame] | 195 | __ASSERT(p_idx < max_partitions, "no matching partition found"); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 196 | |
Andrew Boie | d11e3c3 | 2020-08-25 16:47:44 -0700 | [diff] [blame] | 197 | LOG_DBG("remove partition base %lx size %zu from domain %p\n", |
| 198 | part->start, part->size, domain); |
| 199 | |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 200 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 201 | arch_mem_domain_partition_remove(domain, p_idx); |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 202 | #endif |
Adithya Baglody | eff2ec6 | 2017-10-09 11:37:31 +0530 | [diff] [blame] | 203 | |
Ioannis Glaropoulos | ccf813c | 2018-12-03 14:21:54 +0100 | [diff] [blame] | 204 | /* A zero-sized partition denotes it's a free partition */ |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 205 | domain->partitions[p_idx].size = 0U; |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 206 | |
| 207 | domain->num_partitions--; |
| 208 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 209 | k_spin_unlock(&z_mem_domain_lock, key); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 210 | } |
| 211 | |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 212 | static void add_thread_locked(struct k_mem_domain *domain, |
| 213 | k_tid_t thread) |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 214 | { |
Andrew Boie | 72792a5 | 2020-08-19 15:06:37 -0700 | [diff] [blame] | 215 | __ASSERT_NO_MSG(domain != NULL); |
| 216 | __ASSERT_NO_MSG(thread != NULL); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 217 | |
Andrew Boie | d11e3c3 | 2020-08-25 16:47:44 -0700 | [diff] [blame] | 218 | LOG_DBG("add thread %p to domain %p\n", thread, domain); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 219 | sys_dlist_append(&domain->mem_domain_q, |
| 220 | &thread->mem_domain_info.mem_domain_q_node); |
| 221 | thread->mem_domain_info.mem_domain = domain; |
| 222 | |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 223 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 224 | arch_mem_domain_thread_add(thread); |
Andrew Boie | 00f71b0 | 2020-08-25 17:02:38 -0700 | [diff] [blame] | 225 | #endif |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 226 | } |
Adithya Baglody | 9cde20a | 2017-12-06 16:48:28 +0530 | [diff] [blame] | 227 | |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 228 | static void remove_thread_locked(struct k_thread *thread) |
| 229 | { |
| 230 | __ASSERT_NO_MSG(thread != NULL); |
| 231 | LOG_DBG("remove thread %p from memory domain %p\n", |
| 232 | thread, thread->mem_domain_info.mem_domain); |
| 233 | sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node); |
| 234 | |
| 235 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
| 236 | arch_mem_domain_thread_remove(thread); |
| 237 | #endif |
| 238 | } |
| 239 | |
| 240 | /* Called from thread object initialization */ |
| 241 | void z_mem_domain_init_thread(struct k_thread *thread) |
| 242 | { |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 243 | k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 244 | |
| 245 | /* New threads inherit memory domain configuration from parent */ |
| 246 | add_thread_locked(_current->mem_domain_info.mem_domain, thread); |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 247 | k_spin_unlock(&z_mem_domain_lock, key); |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | /* Called when thread aborts during teardown tasks. sched_spinlock is held */ |
| 251 | void z_mem_domain_exit_thread(struct k_thread *thread) |
| 252 | { |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 253 | k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 254 | remove_thread_locked(thread); |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 255 | k_spin_unlock(&z_mem_domain_lock, key); |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread) |
| 259 | { |
| 260 | k_spinlock_key_t key; |
| 261 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 262 | key = k_spin_lock(&z_mem_domain_lock); |
Andrew Boie | fa6db61 | 2020-10-20 13:26:19 -0700 | [diff] [blame] | 263 | if (thread->mem_domain_info.mem_domain != domain) { |
| 264 | remove_thread_locked(thread); |
| 265 | add_thread_locked(domain, thread); |
| 266 | } |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 267 | k_spin_unlock(&z_mem_domain_lock, key); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | void k_mem_domain_remove_thread(k_tid_t thread) |
| 271 | { |
Andrew Boie | 9bfc8d8 | 2020-08-25 14:42:33 -0700 | [diff] [blame] | 272 | k_mem_domain_add_thread(&k_mem_domain_default, thread); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 273 | } |
| 274 | |
Andrew Boie | 9f87dea | 2020-10-06 15:02:04 -0700 | [diff] [blame] | 275 | void k_mem_domain_destroy(struct k_mem_domain *domain) |
| 276 | { |
| 277 | k_spinlock_key_t key; |
| 278 | sys_dnode_t *node, *next_node; |
| 279 | |
| 280 | __ASSERT_NO_MSG(domain != NULL); |
| 281 | __ASSERT(domain != &k_mem_domain_default, |
| 282 | "cannot destroy default domain"); |
| 283 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 284 | key = k_spin_lock(&z_mem_domain_lock); |
Andrew Boie | 9f87dea | 2020-10-06 15:02:04 -0700 | [diff] [blame] | 285 | |
| 286 | #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API |
| 287 | arch_mem_domain_destroy(domain); |
| 288 | #endif |
| 289 | |
| 290 | SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) { |
| 291 | struct k_thread *thread = |
| 292 | CONTAINER_OF(node, struct k_thread, mem_domain_info); |
| 293 | |
| 294 | remove_thread_locked(thread); |
| 295 | add_thread_locked(&k_mem_domain_default, thread); |
| 296 | } |
| 297 | |
Andrew Boie | 348a0fd | 2020-10-06 15:53:43 -0700 | [diff] [blame] | 298 | k_spin_unlock(&z_mem_domain_lock, key); |
Andrew Boie | 9f87dea | 2020-10-06 15:02:04 -0700 | [diff] [blame] | 299 | } |
| 300 | |
Tomasz Bursztyka | e18fcbb | 2020-04-30 20:33:38 +0200 | [diff] [blame] | 301 | static int init_mem_domain_module(const struct device *arg) |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 302 | { |
| 303 | ARG_UNUSED(arg); |
| 304 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 305 | max_partitions = arch_mem_domain_max_partitions_get(); |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 306 | /* |
| 307 | * max_partitions must be less than or equal to |
| 308 | * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index |
| 309 | * out of bounds error. |
| 310 | */ |
| 311 | __ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, ""); |
| 312 | |
Andrew Boie | 9bfc8d8 | 2020-08-25 14:42:33 -0700 | [diff] [blame] | 313 | k_mem_domain_init(&k_mem_domain_default, 0, NULL); |
| 314 | #ifdef Z_LIBC_PARTITION_EXISTS |
| 315 | k_mem_domain_add_partition(&k_mem_domain_default, &z_libc_partition); |
| 316 | #endif /* Z_LIBC_PARTITION_EXISTS */ |
| 317 | |
Chunlin Han | e9c9702 | 2017-07-07 20:29:30 +0800 | [diff] [blame] | 318 | return 0; |
| 319 | } |
| 320 | |
| 321 | SYS_INIT(init_mem_domain_module, PRE_KERNEL_1, |
| 322 | CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); |