blob: 4e0ff98c13af3d31c23cb4c879321e870b79940a [file] [log] [blame]
Tom Burdick41e0a4a2023-09-27 08:10:10 -05001/*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 */
7
Tom Burdick41e0a4a2023-09-27 08:10:10 -05008#include <zephyr/sys/util.h>
9#include <zephyr/llext/elf.h>
10#include <zephyr/llext/loader.h>
11#include <zephyr/llext/llext.h>
12#include <zephyr/kernel.h>
Luca Burelli17321772023-12-01 18:43:40 +010013#include <zephyr/cache.h>
Tom Burdick41e0a4a2023-09-27 08:10:10 -050014
15#include <zephyr/logging/log.h>
16LOG_MODULE_REGISTER(llext, CONFIG_LLEXT_LOG_LEVEL);
17
18#include <string.h>
19
Luca Burelli9c5412f2024-06-04 10:32:36 +020020#include "llext_priv.h"
Tom Burdick84e883b2024-01-19 11:03:46 -060021
Tom Burdick41e0a4a2023-09-27 08:10:10 -050022static sys_slist_t _llext_list = SYS_SLIST_STATIC_INIT(&_llext_list);
23
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010024static struct k_mutex llext_lock = Z_MUTEX_INITIALIZER(llext_lock);
25
Guennadi Liakhovetski2ccf7752024-03-27 14:43:31 +010026ssize_t llext_find_section(struct llext_loader *ldr, const char *search_name)
27{
Luca Burellid4ea1da2024-06-19 17:25:49 +020028 /* Note that this API is used after llext_load(), so the ldr->sect_hdrs
29 * cache is already freed. A direct search covers all situations.
30 */
Guennadi Liakhovetski2ccf7752024-03-27 14:43:31 +010031
Luca Burellid4ea1da2024-06-19 17:25:49 +020032 elf_shdr_t *shdr;
33 unsigned int i;
34 size_t pos;
35
36 for (i = 0, pos = ldr->hdr.e_shoff;
37 i < ldr->hdr.e_shnum;
38 i++, pos += ldr->hdr.e_shentsize) {
39 shdr = llext_peek(ldr, pos);
40 if (!shdr) {
41 /* The peek() method isn't supported */
42 return -ENOTSUP;
43 }
44
45 const char *name = llext_peek(ldr,
46 ldr->sects[LLEXT_MEM_SHSTRTAB].sh_offset +
47 shdr->sh_name);
48
49 if (!strcmp(name, search_name)) {
50 return shdr->sh_offset;
51 }
52 }
53
54 return -ENOENT;
Guennadi Liakhovetskif98b8bb2023-09-22 16:15:18 +020055}
56
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010057/*
58 * Note, that while we protect the global llext list while searching, we release
59 * the lock before returning the found extension to the caller. Therefore it's
60 * a responsibility of the caller to protect against races with a freeing
61 * context when calling this function.
62 */
Tom Burdick41e0a4a2023-09-27 08:10:10 -050063struct llext *llext_by_name(const char *name)
64{
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010065 k_mutex_lock(&llext_lock, K_FOREVER);
Tom Burdick41e0a4a2023-09-27 08:10:10 -050066
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010067 for (sys_snode_t *node = sys_slist_peek_head(&_llext_list);
68 node != NULL;
69 node = sys_slist_peek_next(node)) {
70 struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
71
Tom Burdick41e0a4a2023-09-27 08:10:10 -050072 if (strncmp(ext->name, name, sizeof(ext->name)) == 0) {
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010073 k_mutex_unlock(&llext_lock);
Tom Burdick41e0a4a2023-09-27 08:10:10 -050074 return ext;
75 }
Tom Burdick41e0a4a2023-09-27 08:10:10 -050076 }
77
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010078 k_mutex_unlock(&llext_lock);
Tom Burdick41e0a4a2023-09-27 08:10:10 -050079 return NULL;
80}
81
Guennadi Liakhovetskiee4540c2023-11-21 17:22:40 +010082int llext_iterate(int (*fn)(struct llext *ext, void *arg), void *arg)
83{
84 sys_snode_t *node;
Guennadi Liakhovetskiee4540c2023-11-21 17:22:40 +010085 int ret = 0;
86
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +010087 k_mutex_lock(&llext_lock, K_FOREVER);
88
Guennadi Liakhovetski92a7c772024-07-05 09:06:10 +020089 for (node = sys_slist_peek_head(&_llext_list);
Guennadi Liakhovetskiee4540c2023-11-21 17:22:40 +010090 node;
Guennadi Liakhovetski92a7c772024-07-05 09:06:10 +020091 node = sys_slist_peek_next(node)) {
Guennadi Liakhovetskiee4540c2023-11-21 17:22:40 +010092 struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
93
94 ret = fn(ext, arg);
95 if (ret) {
96 break;
97 }
98 }
99
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +0100100 k_mutex_unlock(&llext_lock);
Guennadi Liakhovetskiee4540c2023-11-21 17:22:40 +0100101 return ret;
102}
103
Marc Herbert01a82022024-04-11 21:38:13 +0000104const void *llext_find_sym(const struct llext_symtable *sym_table, const char *sym_name)
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500105{
106 if (sym_table == NULL) {
Guennadi Liakhovetskib9bdae82023-11-09 14:28:47 +0100107 /* Built-in symbol table */
Mathieu Choplain8aa6ae42024-05-06 13:03:26 +0200108#ifdef CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID
109 /* 'sym_name' is actually a SLID to search for */
110 uintptr_t slid = (uintptr_t)sym_name;
111
112 /* TODO: perform a binary search instead of linear.
113 * Note that - as of writing - the llext_const_symbol_area
114 * section is sorted in ascending SLID order.
115 * (see scripts/build/llext_prepare_exptab.py)
116 */
117 STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
118 if (slid == sym->slid) {
119 return sym->addr;
120 }
121 }
122#else
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500123 STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
124 if (strcmp(sym->name, sym_name) == 0) {
125 return sym->addr;
126 }
127 }
Mathieu Choplain8aa6ae42024-05-06 13:03:26 +0200128#endif
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500129 } else {
130 /* find symbols in module */
131 for (size_t i = 0; i < sym_table->sym_cnt; i++) {
132 if (strcmp(sym_table->syms[i].name, sym_name) == 0) {
133 return sym_table->syms[i].addr;
134 }
135 }
136 }
137
138 return NULL;
139}
140
Guennadi Liakhovetskib0b4b0b2023-09-26 13:02:25 +0200141int llext_load(struct llext_loader *ldr, const char *name, struct llext **ext,
Guennadi Liakhovetski53323932024-08-29 12:47:25 +0200142 const struct llext_load_param *ldr_parm)
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500143{
Guennadi Liakhovetskiade72c22023-09-27 17:30:59 +0200144 int ret;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500145
Luca Burellicbed9fd2023-12-01 18:25:18 +0100146 *ext = llext_by_name(name);
147
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100148 k_mutex_lock(&llext_lock, K_FOREVER);
149
150 if (*ext) {
151 /* The use count is at least 1 */
152 ret = (*ext)->use_count++;
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +0100153 goto out;
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100154 }
155
Luca Burelli35ef0892024-05-21 12:23:38 +0200156 *ext = llext_alloc(sizeof(struct llext));
157 if (*ext == NULL) {
158 LOG_ERR("Not enough memory for extension metadata");
159 ret = -ENOMEM;
160 goto out;
161 }
Luca Burelli35ef0892024-05-21 12:23:38 +0200162
163 ret = do_llext_load(ldr, *ext, ldr_parm);
164 if (ret < 0) {
165 llext_free(*ext);
166 *ext = NULL;
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +0100167 goto out;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500168 }
169
Luca Burelli35ef0892024-05-21 12:23:38 +0200170 strncpy((*ext)->name, name, sizeof((*ext)->name));
171 (*ext)->name[sizeof((*ext)->name) - 1] = '\0';
172 (*ext)->use_count++;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500173
Luca Burelli35ef0892024-05-21 12:23:38 +0200174 sys_slist_append(&_llext_list, &(*ext)->_llext_list);
175 LOG_INF("Loaded extension %s", (*ext)->name);
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500176
Guennadi Liakhovetski4e3e9a62023-11-20 15:45:13 +0100177out:
178 k_mutex_unlock(&llext_lock);
179 return ret;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500180}
181
Tom Burdick9dffac02024-08-27 18:13:00 +0300182#include <zephyr/logging/log_ctrl.h>
183
184static void llext_log_flush(void)
185{
186#ifdef CONFIG_LOG_MODE_DEFERRED
187 extern struct k_thread logging_thread;
188 int cur_prio = k_thread_priority_get(k_current_get());
189 int log_prio = k_thread_priority_get(&logging_thread);
190 int target_prio;
191 bool adjust_cur, adjust_log;
192
193 /*
194 * Our goal is to raise the logger thread priority above current, but if
195 * current has the highest possble priority, both need to be adjusted,
196 * particularly if the logger thread has the lowest possible priority
197 */
198 if (log_prio < cur_prio) {
199 adjust_cur = false;
200 adjust_log = false;
201 target_prio = 0;
202 } else if (cur_prio == K_HIGHEST_THREAD_PRIO) {
203 adjust_cur = true;
204 adjust_log = true;
205 target_prio = cur_prio;
206 k_thread_priority_set(k_current_get(), cur_prio + 1);
207 } else {
208 adjust_cur = false;
209 adjust_log = true;
210 target_prio = cur_prio - 1;
211 }
212
213 /* adjust logging thread priority if needed */
214 if (adjust_log) {
215 k_thread_priority_set(&logging_thread, target_prio);
216 }
217
218 log_thread_trigger();
219 k_yield();
220
221 if (adjust_log) {
222 k_thread_priority_set(&logging_thread, log_prio);
223 }
224 if (adjust_cur) {
225 k_thread_priority_set(&logging_thread, cur_prio);
226 }
227#endif
228}
229
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100230int llext_unload(struct llext **ext)
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500231{
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100232 __ASSERT(*ext, "Expected non-null extension");
233 struct llext *tmp = *ext;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500234
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100235 k_mutex_lock(&llext_lock, K_FOREVER);
Tom Burdick9dffac02024-08-27 18:13:00 +0300236
237 llext_log_flush();
238
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100239 __ASSERT(tmp->use_count, "A valid LLEXT cannot have a zero use-count!");
240
241 if (tmp->use_count-- != 1) {
242 unsigned int ret = tmp->use_count;
243
244 k_mutex_unlock(&llext_lock);
245 return ret;
246 }
247
248 /* FIXME: protect the global list */
249 sys_slist_find_and_remove(&_llext_list, &tmp->_llext_list);
250
Guennadi Liakhovetskidd50ff52024-08-23 10:45:17 +0200251 llext_dependency_remove_all(tmp);
252
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100253 *ext = NULL;
254 k_mutex_unlock(&llext_lock);
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500255
Luca Burelli3cc452c2024-06-28 17:20:34 +0200256 llext_free_regions(tmp);
Luca Burellia976a1a2024-06-06 18:17:28 +0200257 llext_free(tmp->sym_tab.syms);
Luca Burellicefeae02024-05-21 11:25:07 +0200258 llext_free(tmp->exp_tab.syms);
259 llext_free(tmp);
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500260
Guennadi Liakhovetskia2acd7b2023-11-03 14:16:16 +0100261 return 0;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500262}
263
264int llext_call_fn(struct llext *ext, const char *sym_name)
265{
266 void (*fn)(void);
267
Guennadi Liakhovetskib9bdae82023-11-09 14:28:47 +0100268 fn = llext_find_sym(&ext->exp_tab, sym_name);
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500269 if (fn == NULL) {
Luca Burelli2e085ba2024-06-28 14:35:15 +0200270 return -ENOENT;
Tom Burdick41e0a4a2023-09-27 08:10:10 -0500271 }
272 fn();
273
274 return 0;
275}
Luca Burelliaf302cd2024-08-22 15:52:29 +0200276
277static int call_fn_table(struct llext *ext, bool is_init)
278{
279 ssize_t ret;
280
281 ret = llext_get_fn_table(ext, is_init, NULL, 0);
282 if (ret < 0) {
283 LOG_ERR("Failed to get table size: %d", (int)ret);
284 return ret;
285 }
286
287 typedef void (*elf_void_fn_t)(void);
288
289 int fn_count = ret / sizeof(elf_void_fn_t);
290 elf_void_fn_t fn_table[fn_count];
291
292 ret = llext_get_fn_table(ext, is_init, &fn_table, sizeof(fn_table));
293 if (ret < 0) {
294 LOG_ERR("Failed to get function table: %d", (int)ret);
295 return ret;
296 }
297
298 for (int i = 0; i < fn_count; i++) {
299 LOG_DBG("calling %s function %p()",
Luca Burellie6b32ab2024-09-11 09:23:28 +0200300 is_init ? "bringup" : "teardown", (void *)fn_table[i]);
Luca Burelliaf302cd2024-08-22 15:52:29 +0200301 fn_table[i]();
302 }
303
304 return 0;
305}
306
307inline int llext_bringup(struct llext *ext)
308{
309 return call_fn_table(ext, true);
310}
311
312inline int llext_teardown(struct llext *ext)
313{
314 return call_fn_table(ext, false);
315}
316
317void llext_bootstrap(struct llext *ext, llext_entry_fn_t entry_fn, void *user_data)
318{
319 int ret;
320
321 /* Call initialization functions */
322 ret = llext_bringup(ext);
323 if (ret < 0) {
324 LOG_ERR("Failed to call init functions: %d", ret);
325 return;
326 }
327
328 /* Start extension main function */
Luca Burellie6b32ab2024-09-11 09:23:28 +0200329 LOG_DBG("calling entry function %p(%p)", (void *)entry_fn, user_data);
Luca Burelliaf302cd2024-08-22 15:52:29 +0200330 entry_fn(user_data);
331
332 /* Call de-initialization functions */
333 ret = llext_teardown(ext);
334 if (ret < 0) {
335 LOG_ERR("Failed to call de-init functions: %d", ret);
336 return;
337 }
338}