Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2023 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | * |
| 6 | */ |
| 7 | |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 8 | #include <zephyr/sys/util.h> |
| 9 | #include <zephyr/llext/elf.h> |
| 10 | #include <zephyr/llext/loader.h> |
| 11 | #include <zephyr/llext/llext.h> |
| 12 | #include <zephyr/kernel.h> |
Luca Burelli | 1732177 | 2023-12-01 18:43:40 +0100 | [diff] [blame] | 13 | #include <zephyr/cache.h> |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 14 | |
| 15 | #include <zephyr/logging/log.h> |
| 16 | LOG_MODULE_REGISTER(llext, CONFIG_LLEXT_LOG_LEVEL); |
| 17 | |
| 18 | #include <string.h> |
| 19 | |
Luca Burelli | 9c5412f | 2024-06-04 10:32:36 +0200 | [diff] [blame] | 20 | #include "llext_priv.h" |
Tom Burdick | 84e883b | 2024-01-19 11:03:46 -0600 | [diff] [blame] | 21 | |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 22 | static sys_slist_t _llext_list = SYS_SLIST_STATIC_INIT(&_llext_list); |
| 23 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 24 | static struct k_mutex llext_lock = Z_MUTEX_INITIALIZER(llext_lock); |
| 25 | |
Guennadi Liakhovetski | 2ccf775 | 2024-03-27 14:43:31 +0100 | [diff] [blame] | 26 | ssize_t llext_find_section(struct llext_loader *ldr, const char *search_name) |
| 27 | { |
Luca Burelli | d4ea1da | 2024-06-19 17:25:49 +0200 | [diff] [blame] | 28 | /* Note that this API is used after llext_load(), so the ldr->sect_hdrs |
| 29 | * cache is already freed. A direct search covers all situations. |
| 30 | */ |
Guennadi Liakhovetski | 2ccf775 | 2024-03-27 14:43:31 +0100 | [diff] [blame] | 31 | |
Luca Burelli | d4ea1da | 2024-06-19 17:25:49 +0200 | [diff] [blame] | 32 | elf_shdr_t *shdr; |
| 33 | unsigned int i; |
| 34 | size_t pos; |
| 35 | |
| 36 | for (i = 0, pos = ldr->hdr.e_shoff; |
| 37 | i < ldr->hdr.e_shnum; |
| 38 | i++, pos += ldr->hdr.e_shentsize) { |
| 39 | shdr = llext_peek(ldr, pos); |
| 40 | if (!shdr) { |
| 41 | /* The peek() method isn't supported */ |
| 42 | return -ENOTSUP; |
| 43 | } |
| 44 | |
| 45 | const char *name = llext_peek(ldr, |
| 46 | ldr->sects[LLEXT_MEM_SHSTRTAB].sh_offset + |
| 47 | shdr->sh_name); |
| 48 | |
| 49 | if (!strcmp(name, search_name)) { |
| 50 | return shdr->sh_offset; |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | return -ENOENT; |
Guennadi Liakhovetski | f98b8bb | 2023-09-22 16:15:18 +0200 | [diff] [blame] | 55 | } |
| 56 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 57 | /* |
| 58 | * Note, that while we protect the global llext list while searching, we release |
| 59 | * the lock before returning the found extension to the caller. Therefore it's |
| 60 | * a responsibility of the caller to protect against races with a freeing |
| 61 | * context when calling this function. |
| 62 | */ |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 63 | struct llext *llext_by_name(const char *name) |
| 64 | { |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 65 | k_mutex_lock(&llext_lock, K_FOREVER); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 66 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 67 | for (sys_snode_t *node = sys_slist_peek_head(&_llext_list); |
| 68 | node != NULL; |
| 69 | node = sys_slist_peek_next(node)) { |
| 70 | struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list); |
| 71 | |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 72 | if (strncmp(ext->name, name, sizeof(ext->name)) == 0) { |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 73 | k_mutex_unlock(&llext_lock); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 74 | return ext; |
| 75 | } |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 76 | } |
| 77 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 78 | k_mutex_unlock(&llext_lock); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 79 | return NULL; |
| 80 | } |
| 81 | |
Guennadi Liakhovetski | ee4540c | 2023-11-21 17:22:40 +0100 | [diff] [blame] | 82 | int llext_iterate(int (*fn)(struct llext *ext, void *arg), void *arg) |
| 83 | { |
| 84 | sys_snode_t *node; |
Guennadi Liakhovetski | ee4540c | 2023-11-21 17:22:40 +0100 | [diff] [blame] | 85 | int ret = 0; |
| 86 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 87 | k_mutex_lock(&llext_lock, K_FOREVER); |
| 88 | |
Guennadi Liakhovetski | 92a7c77 | 2024-07-05 09:06:10 +0200 | [diff] [blame] | 89 | for (node = sys_slist_peek_head(&_llext_list); |
Guennadi Liakhovetski | ee4540c | 2023-11-21 17:22:40 +0100 | [diff] [blame] | 90 | node; |
Guennadi Liakhovetski | 92a7c77 | 2024-07-05 09:06:10 +0200 | [diff] [blame] | 91 | node = sys_slist_peek_next(node)) { |
Guennadi Liakhovetski | ee4540c | 2023-11-21 17:22:40 +0100 | [diff] [blame] | 92 | struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list); |
| 93 | |
| 94 | ret = fn(ext, arg); |
| 95 | if (ret) { |
| 96 | break; |
| 97 | } |
| 98 | } |
| 99 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 100 | k_mutex_unlock(&llext_lock); |
Guennadi Liakhovetski | ee4540c | 2023-11-21 17:22:40 +0100 | [diff] [blame] | 101 | return ret; |
| 102 | } |
| 103 | |
Marc Herbert | 01a8202 | 2024-04-11 21:38:13 +0000 | [diff] [blame] | 104 | const void *llext_find_sym(const struct llext_symtable *sym_table, const char *sym_name) |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 105 | { |
| 106 | if (sym_table == NULL) { |
Guennadi Liakhovetski | b9bdae8 | 2023-11-09 14:28:47 +0100 | [diff] [blame] | 107 | /* Built-in symbol table */ |
Mathieu Choplain | 8aa6ae4 | 2024-05-06 13:03:26 +0200 | [diff] [blame] | 108 | #ifdef CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID |
| 109 | /* 'sym_name' is actually a SLID to search for */ |
| 110 | uintptr_t slid = (uintptr_t)sym_name; |
| 111 | |
| 112 | /* TODO: perform a binary search instead of linear. |
| 113 | * Note that - as of writing - the llext_const_symbol_area |
| 114 | * section is sorted in ascending SLID order. |
| 115 | * (see scripts/build/llext_prepare_exptab.py) |
| 116 | */ |
| 117 | STRUCT_SECTION_FOREACH(llext_const_symbol, sym) { |
| 118 | if (slid == sym->slid) { |
| 119 | return sym->addr; |
| 120 | } |
| 121 | } |
| 122 | #else |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 123 | STRUCT_SECTION_FOREACH(llext_const_symbol, sym) { |
| 124 | if (strcmp(sym->name, sym_name) == 0) { |
| 125 | return sym->addr; |
| 126 | } |
| 127 | } |
Mathieu Choplain | 8aa6ae4 | 2024-05-06 13:03:26 +0200 | [diff] [blame] | 128 | #endif |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 129 | } else { |
| 130 | /* find symbols in module */ |
| 131 | for (size_t i = 0; i < sym_table->sym_cnt; i++) { |
| 132 | if (strcmp(sym_table->syms[i].name, sym_name) == 0) { |
| 133 | return sym_table->syms[i].addr; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | return NULL; |
| 139 | } |
| 140 | |
Guennadi Liakhovetski | b0b4b0b | 2023-09-26 13:02:25 +0200 | [diff] [blame] | 141 | int llext_load(struct llext_loader *ldr, const char *name, struct llext **ext, |
Guennadi Liakhovetski | 5332393 | 2024-08-29 12:47:25 +0200 | [diff] [blame] | 142 | const struct llext_load_param *ldr_parm) |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 143 | { |
Guennadi Liakhovetski | ade72c2 | 2023-09-27 17:30:59 +0200 | [diff] [blame] | 144 | int ret; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 145 | |
Luca Burelli | cbed9fd | 2023-12-01 18:25:18 +0100 | [diff] [blame] | 146 | *ext = llext_by_name(name); |
| 147 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 148 | k_mutex_lock(&llext_lock, K_FOREVER); |
| 149 | |
| 150 | if (*ext) { |
| 151 | /* The use count is at least 1 */ |
| 152 | ret = (*ext)->use_count++; |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 153 | goto out; |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 154 | } |
| 155 | |
Luca Burelli | 35ef089 | 2024-05-21 12:23:38 +0200 | [diff] [blame] | 156 | *ext = llext_alloc(sizeof(struct llext)); |
| 157 | if (*ext == NULL) { |
| 158 | LOG_ERR("Not enough memory for extension metadata"); |
| 159 | ret = -ENOMEM; |
| 160 | goto out; |
| 161 | } |
Luca Burelli | 35ef089 | 2024-05-21 12:23:38 +0200 | [diff] [blame] | 162 | |
| 163 | ret = do_llext_load(ldr, *ext, ldr_parm); |
| 164 | if (ret < 0) { |
| 165 | llext_free(*ext); |
| 166 | *ext = NULL; |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 167 | goto out; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 168 | } |
| 169 | |
Luca Burelli | 35ef089 | 2024-05-21 12:23:38 +0200 | [diff] [blame] | 170 | strncpy((*ext)->name, name, sizeof((*ext)->name)); |
| 171 | (*ext)->name[sizeof((*ext)->name) - 1] = '\0'; |
| 172 | (*ext)->use_count++; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 173 | |
Luca Burelli | 35ef089 | 2024-05-21 12:23:38 +0200 | [diff] [blame] | 174 | sys_slist_append(&_llext_list, &(*ext)->_llext_list); |
| 175 | LOG_INF("Loaded extension %s", (*ext)->name); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 176 | |
Guennadi Liakhovetski | 4e3e9a6 | 2023-11-20 15:45:13 +0100 | [diff] [blame] | 177 | out: |
| 178 | k_mutex_unlock(&llext_lock); |
| 179 | return ret; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 180 | } |
| 181 | |
Tom Burdick | 9dffac0 | 2024-08-27 18:13:00 +0300 | [diff] [blame] | 182 | #include <zephyr/logging/log_ctrl.h> |
| 183 | |
| 184 | static void llext_log_flush(void) |
| 185 | { |
| 186 | #ifdef CONFIG_LOG_MODE_DEFERRED |
| 187 | extern struct k_thread logging_thread; |
| 188 | int cur_prio = k_thread_priority_get(k_current_get()); |
| 189 | int log_prio = k_thread_priority_get(&logging_thread); |
| 190 | int target_prio; |
| 191 | bool adjust_cur, adjust_log; |
| 192 | |
| 193 | /* |
| 194 | * Our goal is to raise the logger thread priority above current, but if |
| 195 | * current has the highest possble priority, both need to be adjusted, |
| 196 | * particularly if the logger thread has the lowest possible priority |
| 197 | */ |
| 198 | if (log_prio < cur_prio) { |
| 199 | adjust_cur = false; |
| 200 | adjust_log = false; |
| 201 | target_prio = 0; |
| 202 | } else if (cur_prio == K_HIGHEST_THREAD_PRIO) { |
| 203 | adjust_cur = true; |
| 204 | adjust_log = true; |
| 205 | target_prio = cur_prio; |
| 206 | k_thread_priority_set(k_current_get(), cur_prio + 1); |
| 207 | } else { |
| 208 | adjust_cur = false; |
| 209 | adjust_log = true; |
| 210 | target_prio = cur_prio - 1; |
| 211 | } |
| 212 | |
| 213 | /* adjust logging thread priority if needed */ |
| 214 | if (adjust_log) { |
| 215 | k_thread_priority_set(&logging_thread, target_prio); |
| 216 | } |
| 217 | |
| 218 | log_thread_trigger(); |
| 219 | k_yield(); |
| 220 | |
| 221 | if (adjust_log) { |
| 222 | k_thread_priority_set(&logging_thread, log_prio); |
| 223 | } |
| 224 | if (adjust_cur) { |
| 225 | k_thread_priority_set(&logging_thread, cur_prio); |
| 226 | } |
| 227 | #endif |
| 228 | } |
| 229 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 230 | int llext_unload(struct llext **ext) |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 231 | { |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 232 | __ASSERT(*ext, "Expected non-null extension"); |
| 233 | struct llext *tmp = *ext; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 234 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 235 | k_mutex_lock(&llext_lock, K_FOREVER); |
Tom Burdick | 9dffac0 | 2024-08-27 18:13:00 +0300 | [diff] [blame] | 236 | |
| 237 | llext_log_flush(); |
| 238 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 239 | __ASSERT(tmp->use_count, "A valid LLEXT cannot have a zero use-count!"); |
| 240 | |
| 241 | if (tmp->use_count-- != 1) { |
| 242 | unsigned int ret = tmp->use_count; |
| 243 | |
| 244 | k_mutex_unlock(&llext_lock); |
| 245 | return ret; |
| 246 | } |
| 247 | |
| 248 | /* FIXME: protect the global list */ |
| 249 | sys_slist_find_and_remove(&_llext_list, &tmp->_llext_list); |
| 250 | |
Guennadi Liakhovetski | dd50ff5 | 2024-08-23 10:45:17 +0200 | [diff] [blame] | 251 | llext_dependency_remove_all(tmp); |
| 252 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 253 | *ext = NULL; |
| 254 | k_mutex_unlock(&llext_lock); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 255 | |
Luca Burelli | 3cc452c | 2024-06-28 17:20:34 +0200 | [diff] [blame] | 256 | llext_free_regions(tmp); |
Luca Burelli | a976a1a | 2024-06-06 18:17:28 +0200 | [diff] [blame] | 257 | llext_free(tmp->sym_tab.syms); |
Luca Burelli | cefeae0 | 2024-05-21 11:25:07 +0200 | [diff] [blame] | 258 | llext_free(tmp->exp_tab.syms); |
| 259 | llext_free(tmp); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 260 | |
Guennadi Liakhovetski | a2acd7b | 2023-11-03 14:16:16 +0100 | [diff] [blame] | 261 | return 0; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | int llext_call_fn(struct llext *ext, const char *sym_name) |
| 265 | { |
| 266 | void (*fn)(void); |
| 267 | |
Guennadi Liakhovetski | b9bdae8 | 2023-11-09 14:28:47 +0100 | [diff] [blame] | 268 | fn = llext_find_sym(&ext->exp_tab, sym_name); |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 269 | if (fn == NULL) { |
Luca Burelli | 2e085ba | 2024-06-28 14:35:15 +0200 | [diff] [blame] | 270 | return -ENOENT; |
Tom Burdick | 41e0a4a | 2023-09-27 08:10:10 -0500 | [diff] [blame] | 271 | } |
| 272 | fn(); |
| 273 | |
| 274 | return 0; |
| 275 | } |
Luca Burelli | af302cd | 2024-08-22 15:52:29 +0200 | [diff] [blame] | 276 | |
| 277 | static int call_fn_table(struct llext *ext, bool is_init) |
| 278 | { |
| 279 | ssize_t ret; |
| 280 | |
| 281 | ret = llext_get_fn_table(ext, is_init, NULL, 0); |
| 282 | if (ret < 0) { |
| 283 | LOG_ERR("Failed to get table size: %d", (int)ret); |
| 284 | return ret; |
| 285 | } |
| 286 | |
| 287 | typedef void (*elf_void_fn_t)(void); |
| 288 | |
| 289 | int fn_count = ret / sizeof(elf_void_fn_t); |
| 290 | elf_void_fn_t fn_table[fn_count]; |
| 291 | |
| 292 | ret = llext_get_fn_table(ext, is_init, &fn_table, sizeof(fn_table)); |
| 293 | if (ret < 0) { |
| 294 | LOG_ERR("Failed to get function table: %d", (int)ret); |
| 295 | return ret; |
| 296 | } |
| 297 | |
| 298 | for (int i = 0; i < fn_count; i++) { |
| 299 | LOG_DBG("calling %s function %p()", |
Luca Burelli | e6b32ab | 2024-09-11 09:23:28 +0200 | [diff] [blame] | 300 | is_init ? "bringup" : "teardown", (void *)fn_table[i]); |
Luca Burelli | af302cd | 2024-08-22 15:52:29 +0200 | [diff] [blame] | 301 | fn_table[i](); |
| 302 | } |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | inline int llext_bringup(struct llext *ext) |
| 308 | { |
| 309 | return call_fn_table(ext, true); |
| 310 | } |
| 311 | |
| 312 | inline int llext_teardown(struct llext *ext) |
| 313 | { |
| 314 | return call_fn_table(ext, false); |
| 315 | } |
| 316 | |
| 317 | void llext_bootstrap(struct llext *ext, llext_entry_fn_t entry_fn, void *user_data) |
| 318 | { |
| 319 | int ret; |
| 320 | |
| 321 | /* Call initialization functions */ |
| 322 | ret = llext_bringup(ext); |
| 323 | if (ret < 0) { |
| 324 | LOG_ERR("Failed to call init functions: %d", ret); |
| 325 | return; |
| 326 | } |
| 327 | |
| 328 | /* Start extension main function */ |
Luca Burelli | e6b32ab | 2024-09-11 09:23:28 +0200 | [diff] [blame] | 329 | LOG_DBG("calling entry function %p(%p)", (void *)entry_fn, user_data); |
Luca Burelli | af302cd | 2024-08-22 15:52:29 +0200 | [diff] [blame] | 330 | entry_fn(user_data); |
| 331 | |
| 332 | /* Call de-initialization functions */ |
| 333 | ret = llext_teardown(ext); |
| 334 | if (ret < 0) { |
| 335 | LOG_ERR("Failed to call de-init functions: %d", ret); |
| 336 | return; |
| 337 | } |
| 338 | } |