blob: bd8a83bbc941ae969a76c55db7e1bffcace24ae6 [file] [log] [blame]
Andrew Boie945af952017-08-22 13:15:23 -07001/*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8#include <kernel.h>
9#include <string.h>
Anas Nashif6ecadb02019-06-26 10:33:45 -040010#include <sys/math_extras.h>
Anas Nashif18592442019-06-26 10:33:50 -040011#include <sys/rb.h>
Andrew Boie945af952017-08-22 13:15:23 -070012#include <kernel_structs.h>
Anas Nashifef281c42019-06-25 12:26:23 -040013#include <sys/sys_io.h>
Andrew Boie5cfa5dc2017-08-30 14:17:44 -070014#include <ksched.h>
Andrew Boiea23c2452017-09-13 18:04:21 -070015#include <syscall.h>
Andrew Boie7e3d3d72017-10-10 09:31:32 -070016#include <syscall_handler.h>
Andrew Boie31bdfc02017-11-08 16:38:03 -080017#include <device.h>
18#include <init.h>
Flavio Ceolin8a148172018-12-16 12:39:44 -080019#include <stdbool.h>
Andrew Boie17ce8222019-02-21 13:44:54 -080020#include <app_memory/app_memdomain.h>
Anas Nashif447311e2019-06-26 10:33:44 -040021#include <sys/libc-hooks.h>
Anas Nashif0c9e2802019-06-26 10:33:48 -040022#include <sys/mutex.h>
Andrew Boie800b35f2019-11-05 09:27:18 -080023#include <inttypes.h>
Daniel Leung2117a2a2021-07-12 13:33:32 -070024#include <linker/linker-defs.h>
Andrew Boie17ce8222019-02-21 13:44:54 -080025
Andrew Boie77070602019-02-27 20:12:40 -080026#ifdef Z_LIBC_PARTITION_EXISTS
Andrew Boie17ce8222019-02-21 13:44:54 -080027K_APPMEM_PARTITION_DEFINE(z_libc_partition);
Andrew Boie77070602019-02-27 20:12:40 -080028#endif
Anas Nashif0a0c8c82018-09-17 06:58:09 -050029
Andrew Boiee686aef2019-02-27 14:41:45 -080030/* TODO: Find a better place to put this. Since we pull the entire
Anas Nashif6e27d6d2019-05-09 08:43:30 -040031 * lib..__modules__crypto__mbedtls.a globals into app shared memory
32 * section, we can't put this in zephyr_init.c of the mbedtls module.
Andrew Boiee686aef2019-02-27 14:41:45 -080033 */
34#ifdef CONFIG_MBEDTLS
35K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
36#endif
37
Anas Nashif0a0c8c82018-09-17 06:58:09 -050038#include <logging/log.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010039LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Anas Nashif0a0c8c82018-09-17 06:58:09 -050040
Andy Ross8a3d57b2019-02-06 09:10:36 -080041/* The originally synchronization strategy made heavy use of recursive
42 * irq_locking, which ports poorly to spinlocks which are
43 * non-recursive. Rather than try to redesign as part of
44 * spinlockification, this uses multiple locks to preserve the
45 * original semantics exactly. The locks are named for the data they
46 * protect where possible, or just for the code that uses them where
47 * not.
48 */
49#ifdef CONFIG_DYNAMIC_OBJECTS
50static struct k_spinlock lists_lock; /* kobj rbtree/dlist */
51static struct k_spinlock objfree_lock; /* k_object_free */
52#endif
53static struct k_spinlock obj_lock; /* kobj struct data */
Andy Ross8a3d57b2019-02-06 09:10:36 -080054
Andrew Boie25742192017-10-16 15:29:30 -070055#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
56
Daniel Leunge58b6542018-08-08 11:23:16 -070057#ifdef CONFIG_DYNAMIC_OBJECTS
Kumar Galaa1b77fd2020-05-27 11:26:57 -050058extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
Daniel Leunge58b6542018-08-08 11:23:16 -070059#endif
60
Andrew Boie2dc2ecf2020-03-11 07:13:07 -070061static void clear_perms_cb(struct z_object *ko, void *ctx_ptr);
Daniel Leunge58b6542018-08-08 11:23:16 -070062
Andrew Boie945af952017-08-22 13:15:23 -070063const char *otype_to_str(enum k_objects otype)
64{
Flavio Ceolin3259ac02018-09-11 13:14:21 -070065 const char *ret;
Andrew Boie945af952017-08-22 13:15:23 -070066 /* -fdata-sections doesn't work right except in very very recent
67 * GCC and these literal strings would appear in the binary even if
68 * otype_to_str was omitted by the linker
69 */
Andrew Boiecb1dd742019-10-01 10:28:32 -070070#ifdef CONFIG_LOG
Andrew Boie945af952017-08-22 13:15:23 -070071 switch (otype) {
Leandro Pereira39dc7d02018-04-05 13:59:33 -070072 /* otype-to-str.h is generated automatically during build by
73 * gen_kobject_list.py
74 */
Andrew Boiebe919d32020-05-29 17:49:02 -070075 case K_OBJ_ANY:
76 ret = "generic";
77 break;
Leandro Pereira39dc7d02018-04-05 13:59:33 -070078#include <otype-to-str.h>
Andrew Boie945af952017-08-22 13:15:23 -070079 default:
Flavio Ceolin3259ac02018-09-11 13:14:21 -070080 ret = "?";
81 break;
Andrew Boie945af952017-08-22 13:15:23 -070082 }
83#else
84 ARG_UNUSED(otype);
Maksim Masalskid6c9d402021-05-24 16:30:32 +080085 ret = NULL;
Andrew Boie945af952017-08-22 13:15:23 -070086#endif
Flavio Ceolin3259ac02018-09-11 13:14:21 -070087 return ret;
Andrew Boie945af952017-08-22 13:15:23 -070088}
89
Andrew Boie47f8fd12017-10-05 11:11:02 -070090struct perm_ctx {
91 int parent_id;
92 int child_id;
93 struct k_thread *parent;
94};
95
Andrew Boie28be7932020-03-11 10:56:19 -070096#ifdef CONFIG_GEN_PRIV_STACKS
97/* See write_gperf_table() in scripts/gen_kobject_list.py. The privilege
98 * mode stacks are allocated as an array. The base of the array is
99 * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
100 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500101uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
Andrew Boie28be7932020-03-11 10:56:19 -0700102{
103 struct z_object *obj = z_object_find(stack);
104
105 __ASSERT(obj != NULL, "stack object not found");
106 __ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
107 "bad stack object");
108
109 return obj->data.stack_data->priv;
110}
111#endif /* CONFIG_GEN_PRIV_STACKS */
112
Andrew Boie31bdfc02017-11-08 16:38:03 -0800113#ifdef CONFIG_DYNAMIC_OBJECTS
Daniel Leungfe477ea2020-12-15 13:50:48 -0800114
115/*
116 * Note that dyn_obj->data is where the kernel object resides
117 * so it is the one that actually needs to be aligned.
118 * Due to the need to get the the fields inside struct dyn_obj
119 * from kernel object pointers (i.e. from data[]), the offset
120 * from data[] needs to be fixed at build time. Therefore,
121 * data[] is declared with __aligned(), such that when dyn_obj
122 * is allocated with alignment, data[] is also aligned.
123 * Due to this requirement, data[] needs to be aligned with
124 * the maximum alignment needed for all kernel objects
125 * (hence the following DYN_OBJ_DATA_ALIGN).
126 */
127#ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
128#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT)
129#else
130#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
131#endif
132
133#define DYN_OBJ_DATA_ALIGN \
134 MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
135
Andrew Boie31bdfc02017-11-08 16:38:03 -0800136struct dyn_obj {
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700137 struct z_object kobj;
Daniel Leungabfe0452021-04-27 11:49:30 -0700138 sys_dnode_t dobj_list;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800139 struct rbnode node; /* must be immediately before data member */
Daniel Leungfe477ea2020-12-15 13:50:48 -0800140
141 /* The object itself */
142 uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800143};
144
Peter Bigot2fcf7622020-05-14 05:06:08 -0500145extern struct z_object *z_object_gperf_find(const void *obj);
Patrik Flykt4344e272019-03-08 14:19:05 -0700146extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
Andrew Boie31bdfc02017-11-08 16:38:03 -0800147 void *context);
148
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700149static bool node_lessthan(struct rbnode *a, struct rbnode *b);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800150
Andrew Boie97bf0012018-04-24 17:01:37 -0700151/*
152 * Red/black tree of allocated kernel objects, for reasonably fast lookups
153 * based on object pointer values.
154 */
Andrew Boie31bdfc02017-11-08 16:38:03 -0800155static struct rbtree obj_rb_tree = {
156 .lessthan_fn = node_lessthan
157};
158
Andrew Boie97bf0012018-04-24 17:01:37 -0700159/*
160 * Linked list of allocated kernel objects, for iteration over all allocated
161 * objects (and potentially deleting them during iteration).
162 */
163static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
164
165/*
166 * TODO: Write some hash table code that will replace both obj_rb_tree
167 * and obj_list.
168 */
169
Andrew Boie31bdfc02017-11-08 16:38:03 -0800170static size_t obj_size_get(enum k_objects otype)
171{
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700172 size_t ret;
173
Andrew Boie31bdfc02017-11-08 16:38:03 -0800174 switch (otype) {
Andrew Boie47fa8eb2018-05-16 10:11:17 -0700175#include <otype-to-size.h>
Andrew Boie31bdfc02017-11-08 16:38:03 -0800176 default:
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +0200177 ret = sizeof(const struct device);
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700178 break;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800179 }
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700180
181 return ret;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800182}
183
Daniel Leungfe477ea2020-12-15 13:50:48 -0800184static size_t obj_align_get(enum k_objects otype)
185{
186 size_t ret;
187
188 switch (otype) {
189 case K_OBJ_THREAD:
190#ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
191 ret = ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT;
192#else
193 ret = sizeof(void *);
194#endif
195 break;
196 default:
197 ret = sizeof(void *);
198 break;
199 }
200
201 return ret;
202}
203
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700204static bool node_lessthan(struct rbnode *a, struct rbnode *b)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800205{
206 return a < b;
207}
208
209static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
210{
211 return CONTAINER_OF(node, struct dyn_obj, node);
212}
213
Daniel Leungfe477ea2020-12-15 13:50:48 -0800214static inline struct rbnode *dyn_obj_to_node(void *obj)
215{
216 struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
217
218 return &dobj->node;
219}
220
Andrew Boie31bdfc02017-11-08 16:38:03 -0800221static struct dyn_obj *dyn_object_find(void *obj)
222{
223 struct rbnode *node;
224 struct dyn_obj *ret;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800225
226 /* For any dynamically allocated kernel object, the object
Naiyuan Tianbc3fda42021-08-23 23:32:58 +0800227 * pointer is just a member of the containing struct dyn_obj,
Andrew Boie31bdfc02017-11-08 16:38:03 -0800228 * so just a little arithmetic is necessary to locate the
229 * corresponding struct rbnode
230 */
Daniel Leungfe477ea2020-12-15 13:50:48 -0800231 node = dyn_obj_to_node(obj);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800232
Andy Ross8a3d57b2019-02-06 09:10:36 -0800233 k_spinlock_key_t key = k_spin_lock(&lists_lock);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800234 if (rb_contains(&obj_rb_tree, node)) {
235 ret = node_to_dyn_obj(node);
236 } else {
237 ret = NULL;
238 }
Andy Ross8a3d57b2019-02-06 09:10:36 -0800239 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800240
241 return ret;
242}
243
Daniel Leunge58b6542018-08-08 11:23:16 -0700244/**
245 * @internal
246 *
247 * @brief Allocate a new thread index for a new thread.
248 *
249 * This finds an unused thread index that can be assigned to a new
250 * thread. If too many threads have been allocated, the kernel will
251 * run out of indexes and this function will fail.
252 *
253 * Note that if an unused index is found, that index will be marked as
254 * used after return of this function.
255 *
256 * @param tidx The new thread index if successful
257 *
Flavio Ceolin8a148172018-12-16 12:39:44 -0800258 * @return true if successful, false if failed
Daniel Leunge58b6542018-08-08 11:23:16 -0700259 **/
Andrew Boie428afe52019-11-18 10:20:16 -0800260static bool thread_idx_alloc(uintptr_t *tidx)
Daniel Leunge58b6542018-08-08 11:23:16 -0700261{
262 int i;
263 int idx;
264 int base;
265
266 base = 0;
267 for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
268 idx = find_lsb_set(_thread_idx_map[i]);
269
Flavio Ceolin76b35182018-12-16 12:48:29 -0800270 if (idx != 0) {
Daniel Leunge58b6542018-08-08 11:23:16 -0700271 *tidx = base + (idx - 1);
272
273 sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
274 *tidx);
275
276 /* Clear permission from all objects */
Patrik Flykt4344e272019-03-08 14:19:05 -0700277 z_object_wordlist_foreach(clear_perms_cb,
Daniel Leunge58b6542018-08-08 11:23:16 -0700278 (void *)*tidx);
279
Flavio Ceolin8a148172018-12-16 12:39:44 -0800280 return true;
Daniel Leunge58b6542018-08-08 11:23:16 -0700281 }
282
283 base += 8;
284 }
285
Flavio Ceolin8a148172018-12-16 12:39:44 -0800286 return false;
Daniel Leunge58b6542018-08-08 11:23:16 -0700287}
288
289/**
290 * @internal
291 *
292 * @brief Free a thread index.
293 *
294 * This frees a thread index so it can be used by another
295 * thread.
296 *
297 * @param tidx The thread index to be freed
298 **/
Andrew Boie428afe52019-11-18 10:20:16 -0800299static void thread_idx_free(uintptr_t tidx)
Daniel Leunge58b6542018-08-08 11:23:16 -0700300{
301 /* To prevent leaked permission when index is recycled */
Patrik Flykt4344e272019-03-08 14:19:05 -0700302 z_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
Daniel Leunge58b6542018-08-08 11:23:16 -0700303
304 sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
305}
306
Daniel Leungfe477ea2020-12-15 13:50:48 -0800307struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800308{
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700309 struct dyn_obj *dyn;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800310
Daniel Leungfe477ea2020-12-15 13:50:48 -0800311 dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700312 if (dyn == NULL) {
Andrew Boiebe919d32020-05-29 17:49:02 -0700313 LOG_ERR("could not allocate kernel object, out of memory");
Andrew Boie31bdfc02017-11-08 16:38:03 -0800314 return NULL;
315 }
316
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700317 dyn->kobj.name = &dyn->data;
318 dyn->kobj.type = K_OBJ_ANY;
319 dyn->kobj.flags = 0;
320 (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800321
Andy Ross8a3d57b2019-02-06 09:10:36 -0800322 k_spinlock_key_t key = k_spin_lock(&lists_lock);
323
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700324 rb_insert(&obj_rb_tree, &dyn->node);
Daniel Leungabfe0452021-04-27 11:49:30 -0700325 sys_dlist_append(&obj_list, &dyn->dobj_list);
Andy Ross8a3d57b2019-02-06 09:10:36 -0800326 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800327
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700328 return &dyn->kobj;
Andrew Boiebe919d32020-05-29 17:49:02 -0700329}
330
331void *z_impl_k_object_alloc(enum k_objects otype)
332{
333 struct z_object *zo;
Ioannis Glaropoulos8ada29e2020-06-11 09:53:01 +0200334 uintptr_t tidx = 0;
Andrew Boiebe919d32020-05-29 17:49:02 -0700335
336 if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
337 LOG_ERR("bad object type %d requested", otype);
338 return NULL;
339 }
340
341 switch (otype) {
342 case K_OBJ_THREAD:
343 if (!thread_idx_alloc(&tidx)) {
344 LOG_ERR("out of free thread indexes");
345 return NULL;
346 }
347 break;
348 /* The following are currently not allowed at all */
349 case K_OBJ_FUTEX: /* Lives in user memory */
350 case K_OBJ_SYS_MUTEX: /* Lives in user memory */
351 case K_OBJ_THREAD_STACK_ELEMENT: /* No aligned allocator */
352 case K_OBJ_NET_SOCKET: /* Indeterminate size */
353 LOG_ERR("forbidden object type '%s' requested",
354 otype_to_str(otype));
355 return NULL;
356 default:
357 /* Remainder within bounds are permitted */
358 break;
359 }
360
Daniel Leungfe477ea2020-12-15 13:50:48 -0800361 zo = z_dynamic_object_aligned_create(obj_align_get(otype),
362 obj_size_get(otype));
Andrew Boiebe919d32020-05-29 17:49:02 -0700363 if (zo == NULL) {
364 return NULL;
365 }
366 zo->type = otype;
367
368 if (otype == K_OBJ_THREAD) {
369 zo->data.thread_id = tidx;
370 }
371
372 /* The allocating thread implicitly gets permission on kernel objects
373 * that it allocates
374 */
375 z_thread_perms_set(zo, _current);
376
377 /* Activates reference counting logic for automatic disposal when
378 * all permissions have been revoked
379 */
380 zo->flags |= K_OBJ_FLAG_ALLOC;
381
382 return zo->name;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800383}
384
385void k_object_free(void *obj)
386{
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700387 struct dyn_obj *dyn;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800388
389 /* This function is intentionally not exposed to user mode.
390 * There's currently no robust way to track that an object isn't
391 * being used by some other thread
392 */
393
Andy Ross8a3d57b2019-02-06 09:10:36 -0800394 k_spinlock_key_t key = k_spin_lock(&objfree_lock);
395
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700396 dyn = dyn_object_find(obj);
397 if (dyn != NULL) {
398 rb_remove(&obj_rb_tree, &dyn->node);
Daniel Leungabfe0452021-04-27 11:49:30 -0700399 sys_dlist_remove(&dyn->dobj_list);
Daniel Leunge58b6542018-08-08 11:23:16 -0700400
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700401 if (dyn->kobj.type == K_OBJ_THREAD) {
402 thread_idx_free(dyn->kobj.data.thread_id);
Daniel Leunge58b6542018-08-08 11:23:16 -0700403 }
Andrew Boie31bdfc02017-11-08 16:38:03 -0800404 }
Andy Ross8a3d57b2019-02-06 09:10:36 -0800405 k_spin_unlock(&objfree_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800406
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700407 if (dyn != NULL) {
408 k_free(dyn);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800409 }
410}
411
Peter Bigot2fcf7622020-05-14 05:06:08 -0500412struct z_object *z_object_find(const void *obj)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800413{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700414 struct z_object *ret;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800415
Patrik Flykt4344e272019-03-08 14:19:05 -0700416 ret = z_object_gperf_find(obj);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800417
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700418 if (ret == NULL) {
Flavio Ceolin22236c92018-10-05 12:24:09 -0700419 struct dyn_obj *dynamic_obj;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800420
Peter Bigot2fcf7622020-05-14 05:06:08 -0500421 /* The cast to pointer-to-non-const violates MISRA
422 * 11.8 but is justified since we know dynamic objects
423 * were not declared with a const qualifier.
424 */
425 dynamic_obj = dyn_object_find((void *)obj);
Flavio Ceolin22236c92018-10-05 12:24:09 -0700426 if (dynamic_obj != NULL) {
427 ret = &dynamic_obj->kobj;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800428 }
429 }
430
431 return ret;
432}
433
Patrik Flykt4344e272019-03-08 14:19:05 -0700434void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800435{
Andrew Boie97bf0012018-04-24 17:01:37 -0700436 struct dyn_obj *obj, *next;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800437
Patrik Flykt4344e272019-03-08 14:19:05 -0700438 z_object_gperf_wordlist_foreach(func, context);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800439
Andy Ross8a3d57b2019-02-06 09:10:36 -0800440 k_spinlock_key_t key = k_spin_lock(&lists_lock);
441
Daniel Leungabfe0452021-04-27 11:49:30 -0700442 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
Andrew Boie97bf0012018-04-24 17:01:37 -0700443 func(&obj->kobj, context);
444 }
Andy Ross8a3d57b2019-02-06 09:10:36 -0800445 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800446}
447#endif /* CONFIG_DYNAMIC_OBJECTS */
448
Andrew Boief2734ab2020-03-11 06:37:42 -0700449static unsigned int thread_index_get(struct k_thread *thread)
Andrew Boie818a96d2017-11-03 09:00:35 -0700450{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700451 struct z_object *ko;
Andrew Boie818a96d2017-11-03 09:00:35 -0700452
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500453 ko = z_object_find(thread);
Andrew Boie818a96d2017-11-03 09:00:35 -0700454
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700455 if (ko == NULL) {
Andrew Boie818a96d2017-11-03 09:00:35 -0700456 return -1;
457 }
458
Andrew Boief2734ab2020-03-11 06:37:42 -0700459 return ko->data.thread_id;
Andrew Boie818a96d2017-11-03 09:00:35 -0700460}
461
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700462static void unref_check(struct z_object *ko, uintptr_t index)
Andrew Boie337e7432018-04-13 14:44:00 -0700463{
Andy Ross8a3d57b2019-02-06 09:10:36 -0800464 k_spinlock_key_t key = k_spin_lock(&obj_lock);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800465
466 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
467
468#ifdef CONFIG_DYNAMIC_OBJECTS
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700469 struct dyn_obj *dyn =
Andrew Boie7ecc3592019-01-31 12:09:06 -0800470 CONTAINER_OF(ko, struct dyn_obj, kobj);
471
Patrik Flykt24d71432019-03-26 19:57:45 -0600472 if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
Andrew Boie7ecc3592019-01-31 12:09:06 -0800473 goto out;
474 }
475
Andrew Boie337e7432018-04-13 14:44:00 -0700476 for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
Patrik Flykt24d71432019-03-26 19:57:45 -0600477 if (ko->perms[i] != 0U) {
Andrew Boie7ecc3592019-01-31 12:09:06 -0800478 goto out;
Andrew Boie337e7432018-04-13 14:44:00 -0700479 }
480 }
481
482 /* This object has no more references. Some objects may have
483 * dynamically allocated resources, require cleanup, or need to be
484 * marked as uninitailized when all references are gone. What
485 * specifically needs to happen depends on the object type.
486 */
487 switch (ko->type) {
Andrew Boie44fe8122018-04-12 17:38:12 -0700488 case K_OBJ_PIPE:
489 k_pipe_cleanup((struct k_pipe *)ko->name);
490 break;
Andrew Boie0fe789f2018-04-12 18:35:56 -0700491 case K_OBJ_MSGQ:
492 k_msgq_cleanup((struct k_msgq *)ko->name);
493 break;
Andrew Boief3bee952018-05-02 17:44:39 -0700494 case K_OBJ_STACK:
495 k_stack_cleanup((struct k_stack *)ko->name);
496 break;
Andrew Boie337e7432018-04-13 14:44:00 -0700497 default:
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700498 /* Nothing to do */
Andrew Boie337e7432018-04-13 14:44:00 -0700499 break;
500 }
Andrew Boie97bf0012018-04-24 17:01:37 -0700501
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700502 rb_remove(&obj_rb_tree, &dyn->node);
Daniel Leungabfe0452021-04-27 11:49:30 -0700503 sys_dlist_remove(&dyn->dobj_list);
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700504 k_free(dyn);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800505out:
Andrew Boie97bf0012018-04-24 17:01:37 -0700506#endif
Andy Ross8a3d57b2019-02-06 09:10:36 -0800507 k_spin_unlock(&obj_lock, key);
Andrew Boie337e7432018-04-13 14:44:00 -0700508}
509
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700510static void wordlist_cb(struct z_object *ko, void *ctx_ptr)
Andrew Boie47f8fd12017-10-05 11:11:02 -0700511{
512 struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
513
514 if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
515 (struct k_thread *)ko->name != ctx->parent) {
516 sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
517 }
518}
519
Patrik Flykt4344e272019-03-08 14:19:05 -0700520void z_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
Andrew Boie47f8fd12017-10-05 11:11:02 -0700521{
522 struct perm_ctx ctx = {
Andrew Boie818a96d2017-11-03 09:00:35 -0700523 thread_index_get(parent),
524 thread_index_get(child),
Andrew Boie47f8fd12017-10-05 11:11:02 -0700525 parent
526 };
527
Andrew Boie818a96d2017-11-03 09:00:35 -0700528 if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700529 z_object_wordlist_foreach(wordlist_cb, &ctx);
Andrew Boie47f8fd12017-10-05 11:11:02 -0700530 }
531}
532
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700533void z_thread_perms_set(struct z_object *ko, struct k_thread *thread)
Andrew Boie945af952017-08-22 13:15:23 -0700534{
Andrew Boie818a96d2017-11-03 09:00:35 -0700535 int index = thread_index_get(thread);
536
537 if (index != -1) {
538 sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
Andrew Boie2acfcd62017-08-30 14:31:03 -0700539 }
Andrew Boie945af952017-08-22 13:15:23 -0700540}
541
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700542void z_thread_perms_clear(struct z_object *ko, struct k_thread *thread)
Andrew Boiea89bf012017-10-09 14:47:55 -0700543{
Andrew Boie818a96d2017-11-03 09:00:35 -0700544 int index = thread_index_get(thread);
545
546 if (index != -1) {
Andy Ross8a3d57b2019-02-06 09:10:36 -0800547 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800548 unref_check(ko, index);
Andrew Boiea89bf012017-10-09 14:47:55 -0700549 }
550}
551
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700552static void clear_perms_cb(struct z_object *ko, void *ctx_ptr)
Andrew Boie04caa672017-10-13 13:57:07 -0700553{
Andrew Boie428afe52019-11-18 10:20:16 -0800554 uintptr_t id = (uintptr_t)ctx_ptr;
Andrew Boie04caa672017-10-13 13:57:07 -0700555
Andrew Boie7ecc3592019-01-31 12:09:06 -0800556 unref_check(ko, id);
Andrew Boie04caa672017-10-13 13:57:07 -0700557}
558
Patrik Flykt4344e272019-03-08 14:19:05 -0700559void z_thread_perms_all_clear(struct k_thread *thread)
Andrew Boie04caa672017-10-13 13:57:07 -0700560{
Andrew Boie428afe52019-11-18 10:20:16 -0800561 uintptr_t index = thread_index_get(thread);
Andrew Boie818a96d2017-11-03 09:00:35 -0700562
Carlo Caionef1612232020-10-12 12:10:45 +0200563 if ((int)index != -1) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700564 z_object_wordlist_foreach(clear_perms_cb, (void *)index);
Andrew Boie04caa672017-10-13 13:57:07 -0700565 }
566}
567
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700568static int thread_perms_test(struct z_object *ko)
Andrew Boie945af952017-08-22 13:15:23 -0700569{
Andrew Boie818a96d2017-11-03 09:00:35 -0700570 int index;
571
Patrik Flykt24d71432019-03-26 19:57:45 -0600572 if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
Andrew Boie04caa672017-10-13 13:57:07 -0700573 return 1;
574 }
575
Andrew Boie818a96d2017-11-03 09:00:35 -0700576 index = thread_index_get(_current);
577 if (index != -1) {
578 return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
Andrew Boie2acfcd62017-08-30 14:31:03 -0700579 }
580 return 0;
Andrew Boie945af952017-08-22 13:15:23 -0700581}
582
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700583static void dump_permission_error(struct z_object *ko)
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700584{
Andrew Boie818a96d2017-11-03 09:00:35 -0700585 int index = thread_index_get(_current);
Andrew Boie99b3f862019-09-30 14:25:23 -0700586 LOG_ERR("thread %p (%d) does not have permission on %s %p",
587 _current, index,
588 otype_to_str(ko->type), ko->name);
589 LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700590}
Andrew Boie945af952017-08-22 13:15:23 -0700591
Peter Bigot2fcf7622020-05-14 05:06:08 -0500592void z_dump_object_error(int retval, const void *obj, struct z_object *ko,
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700593 enum k_objects otype)
594{
595 switch (retval) {
596 case -EBADF:
Andrew Boie99b3f862019-09-30 14:25:23 -0700597 LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
Andrew Boiebe919d32020-05-29 17:49:02 -0700598 if (ko == NULL) {
599 LOG_ERR("address is not a known kernel object");
600 } else {
601 LOG_ERR("address is actually a %s",
602 otype_to_str(ko->type));
603 }
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700604 break;
605 case -EPERM:
606 dump_permission_error(ko);
607 break;
608 case -EINVAL:
Andrew Boie99b3f862019-09-30 14:25:23 -0700609 LOG_ERR("%p used before initialization", obj);
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700610 break;
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700611 case -EADDRINUSE:
Andrew Boie99b3f862019-09-30 14:25:23 -0700612 LOG_ERR("%p %s in use", obj, otype_to_str(otype));
Flavio Ceolina3cea502018-09-10 22:54:55 -0700613 break;
614 default:
615 /* Not handled error */
616 break;
Andrew Boie945af952017-08-22 13:15:23 -0700617 }
Andrew Boie3b5ae802017-10-04 12:10:32 -0700618}
619
Peter Bigot2fcf7622020-05-14 05:06:08 -0500620void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
Andrew Boie3b5ae802017-10-04 12:10:32 -0700621{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700622 struct z_object *ko = z_object_find(object);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700623
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700624 if (ko != NULL) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700625 z_thread_perms_set(ko, thread);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700626 }
627}
628
Peter Bigot2fcf7622020-05-14 05:06:08 -0500629void k_object_access_revoke(const void *object, struct k_thread *thread)
Andrew Boiea89bf012017-10-09 14:47:55 -0700630{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700631 struct z_object *ko = z_object_find(object);
Andrew Boiea89bf012017-10-09 14:47:55 -0700632
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700633 if (ko != NULL) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700634 z_thread_perms_clear(ko, thread);
Andrew Boiea89bf012017-10-09 14:47:55 -0700635 }
636}
637
Peter Bigot2fcf7622020-05-14 05:06:08 -0500638void z_impl_k_object_release(const void *object)
Andrew Boiee9cfc542018-04-13 13:15:28 -0700639{
640 k_object_access_revoke(object, _current);
641}
642
Peter Bigot2fcf7622020-05-14 05:06:08 -0500643void k_object_access_all_grant(const void *object)
Andrew Boie3b5ae802017-10-04 12:10:32 -0700644{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700645 struct z_object *ko = z_object_find(object);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700646
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700647 if (ko != NULL) {
Andrew Boie04caa672017-10-13 13:57:07 -0700648 ko->flags |= K_OBJ_FLAG_PUBLIC;
Andrew Boie3b5ae802017-10-04 12:10:32 -0700649 }
Andrew Boie945af952017-08-22 13:15:23 -0700650}
651
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700652int z_object_validate(struct z_object *ko, enum k_objects otype,
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700653 enum _obj_init_check init)
Andrew Boie945af952017-08-22 13:15:23 -0700654{
Flavio Ceolinea716bf2018-09-20 16:30:45 -0700655 if (unlikely((ko == NULL) ||
656 (otype != K_OBJ_ANY && ko->type != otype))) {
Andrew Boie945af952017-08-22 13:15:23 -0700657 return -EBADF;
658 }
659
Andrew Boie3a0f6842017-10-09 12:46:25 -0700660 /* Manipulation of any kernel objects by a user thread requires that
661 * thread be granted access first, even for uninitialized objects
Andrew Boie945af952017-08-22 13:15:23 -0700662 */
Flavio Ceolin2df02cc2019-03-14 14:32:45 -0700663 if (unlikely(thread_perms_test(ko) == 0)) {
Andrew Boie945af952017-08-22 13:15:23 -0700664 return -EPERM;
665 }
666
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700667 /* Initialization state checks. _OBJ_INIT_ANY, we don't care */
668 if (likely(init == _OBJ_INIT_TRUE)) {
Naiyuan Tianbc3fda42021-08-23 23:32:58 +0800669 /* Object MUST be initialized */
Patrik Flykt21358ba2019-03-28 14:57:54 -0600670 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700671 return -EINVAL;
672 }
Maksim Masalski929956d2021-05-17 16:58:20 +0800673 } else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700674 /* Object MUST NOT be initialized */
Patrik Flykt21358ba2019-03-28 14:57:54 -0600675 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700676 return -EADDRINUSE;
677 }
Flavio Ceolin3e97acc2018-09-25 11:24:28 -0700678 } else {
679 /* _OBJ_INIT_ANY */
Andrew Boie945af952017-08-22 13:15:23 -0700680 }
681
682 return 0;
683}
684
Peter Bigot2fcf7622020-05-14 05:06:08 -0500685void z_object_init(const void *obj)
Andrew Boie945af952017-08-22 13:15:23 -0700686{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700687 struct z_object *ko;
Andrew Boie945af952017-08-22 13:15:23 -0700688
689 /* By the time we get here, if the caller was from userspace, all the
Patrik Flykt4344e272019-03-08 14:19:05 -0700690 * necessary checks have been done in z_object_validate(), which takes
Andrew Boie945af952017-08-22 13:15:23 -0700691 * place before the object is initialized.
692 *
693 * This function runs after the object has been initialized and
694 * finalizes it
695 */
696
Patrik Flykt4344e272019-03-08 14:19:05 -0700697 ko = z_object_find(obj);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700698 if (ko == NULL) {
Andrew Boie945af952017-08-22 13:15:23 -0700699 /* Supervisor threads can ignore rules about kernel objects
700 * and may declare them on stacks, etc. Such objects will never
701 * be usable from userspace, but we shouldn't explode.
702 */
703 return;
704 }
705
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700706 /* Allows non-initialization system calls to be made on this object */
Andrew Boie945af952017-08-22 13:15:23 -0700707 ko->flags |= K_OBJ_FLAG_INITIALIZED;
708}
709
Peter Bigot2fcf7622020-05-14 05:06:08 -0500710void z_object_recycle(const void *obj)
Andrew Boie83fda7c2018-07-31 14:39:11 -0700711{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700712 struct z_object *ko = z_object_find(obj);
Andrew Boie83fda7c2018-07-31 14:39:11 -0700713
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700714 if (ko != NULL) {
Flavio Ceolinda49f2e2018-09-11 19:09:03 -0700715 (void)memset(ko->perms, 0, sizeof(ko->perms));
Patrik Flykt4344e272019-03-08 14:19:05 -0700716 z_thread_perms_set(ko, k_current_get());
Andrew Boie83fda7c2018-07-31 14:39:11 -0700717 ko->flags |= K_OBJ_FLAG_INITIALIZED;
718 }
719}
720
Peter Bigot2fcf7622020-05-14 05:06:08 -0500721void z_object_uninit(const void *obj)
Andrew Boie4a9a4242017-10-05 12:21:36 -0700722{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700723 struct z_object *ko;
Andrew Boie4a9a4242017-10-05 12:21:36 -0700724
Patrik Flykt4344e272019-03-08 14:19:05 -0700725 /* See comments in z_object_init() */
726 ko = z_object_find(obj);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700727 if (ko == NULL) {
Andrew Boie4a9a4242017-10-05 12:21:36 -0700728 return;
729 }
730
731 ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
732}
733
Andrew Boiec8188f62018-06-22 14:31:51 -0700734/*
735 * Copy to/from helper functions used in syscall handlers
736 */
Andrew Boie526807c2019-03-28 15:17:31 -0700737void *z_user_alloc_from_copy(const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700738{
739 void *dst = NULL;
Andrew Boiec8188f62018-06-22 14:31:51 -0700740
741 /* Does the caller in user mode have access to read this memory? */
742 if (Z_SYSCALL_MEMORY_READ(src, size)) {
743 goto out_err;
744 }
745
746 dst = z_thread_malloc(size);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700747 if (dst == NULL) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700748 LOG_ERR("out of thread resource pool memory (%zu)", size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700749 goto out_err;
750 }
751
Flavio Ceolin66994232018-08-13 15:17:04 -0700752 (void)memcpy(dst, src, size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700753out_err:
Andrew Boiec8188f62018-06-22 14:31:51 -0700754 return dst;
755}
756
Andrew Boie526807c2019-03-28 15:17:31 -0700757static int user_copy(void *dst, const void *src, size_t size, bool to_user)
Andrew Boiec8188f62018-06-22 14:31:51 -0700758{
759 int ret = EFAULT;
Andrew Boiec8188f62018-06-22 14:31:51 -0700760
761 /* Does the caller in user mode have access to this memory? */
762 if (to_user ? Z_SYSCALL_MEMORY_WRITE(dst, size) :
763 Z_SYSCALL_MEMORY_READ(src, size)) {
764 goto out_err;
765 }
766
Flavio Ceolin66994232018-08-13 15:17:04 -0700767 (void)memcpy(dst, src, size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700768 ret = 0;
769out_err:
Andrew Boiec8188f62018-06-22 14:31:51 -0700770 return ret;
771}
772
Andrew Boie526807c2019-03-28 15:17:31 -0700773int z_user_from_copy(void *dst, const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700774{
775 return user_copy(dst, src, size, false);
776}
777
Andrew Boie526807c2019-03-28 15:17:31 -0700778int z_user_to_copy(void *dst, const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700779{
780 return user_copy(dst, src, size, true);
781}
782
Andrew Boie526807c2019-03-28 15:17:31 -0700783char *z_user_string_alloc_copy(const char *src, size_t maxlen)
Andrew Boiec8188f62018-06-22 14:31:51 -0700784{
Jakob Olesenc8708d92019-05-07 10:17:35 -0700785 size_t actual_len;
Flavio Ceolin0866d182018-08-14 17:57:08 -0700786 int err;
Andrew Boiec8188f62018-06-22 14:31:51 -0700787 char *ret = NULL;
788
Andrew Boiec8188f62018-06-22 14:31:51 -0700789 actual_len = z_user_string_nlen(src, maxlen, &err);
Flavio Ceolin76b35182018-12-16 12:48:29 -0800790 if (err != 0) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700791 goto out;
792 }
793 if (actual_len == maxlen) {
794 /* Not NULL terminated */
Andrew Boie99b3f862019-09-30 14:25:23 -0700795 LOG_ERR("string too long %p (%zu)", src, actual_len);
Andrew Boiec8188f62018-06-22 14:31:51 -0700796 goto out;
797 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700798 if (size_add_overflow(actual_len, 1, &actual_len)) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700799 LOG_ERR("overflow");
Andrew Boiec8188f62018-06-22 14:31:51 -0700800 goto out;
801 }
802
803 ret = z_user_alloc_from_copy(src, actual_len);
Andrew Boie09dc9292019-04-12 12:32:34 -0700804
805 /* Someone may have modified the source string during the above
806 * checks. Ensure what we actually copied is still terminated
807 * properly.
808 */
809 if (ret != NULL) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400810 ret[actual_len - 1U] = '\0';
Andrew Boie09dc9292019-04-12 12:32:34 -0700811 }
Andrew Boiec8188f62018-06-22 14:31:51 -0700812out:
Andrew Boiec8188f62018-06-22 14:31:51 -0700813 return ret;
814}
815
Andrew Boie526807c2019-03-28 15:17:31 -0700816int z_user_string_copy(char *dst, const char *src, size_t maxlen)
Andrew Boiec8188f62018-06-22 14:31:51 -0700817{
Jakob Olesenc8708d92019-05-07 10:17:35 -0700818 size_t actual_len;
Flavio Ceolin0866d182018-08-14 17:57:08 -0700819 int ret, err;
Andrew Boiec8188f62018-06-22 14:31:51 -0700820
Andrew Boiec8188f62018-06-22 14:31:51 -0700821 actual_len = z_user_string_nlen(src, maxlen, &err);
Flavio Ceolin76b35182018-12-16 12:48:29 -0800822 if (err != 0) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700823 ret = EFAULT;
824 goto out;
825 }
826 if (actual_len == maxlen) {
827 /* Not NULL terminated */
Andrew Boie99b3f862019-09-30 14:25:23 -0700828 LOG_ERR("string too long %p (%zu)", src, actual_len);
Andrew Boiec8188f62018-06-22 14:31:51 -0700829 ret = EINVAL;
830 goto out;
831 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700832 if (size_add_overflow(actual_len, 1, &actual_len)) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700833 LOG_ERR("overflow");
Andrew Boiec8188f62018-06-22 14:31:51 -0700834 ret = EINVAL;
835 goto out;
836 }
837
838 ret = z_user_from_copy(dst, src, actual_len);
Andrew Boie09dc9292019-04-12 12:32:34 -0700839
840 /* See comment above in z_user_string_alloc_copy() */
841 dst[actual_len - 1] = '\0';
Andrew Boiec8188f62018-06-22 14:31:51 -0700842out:
Andrew Boiec8188f62018-06-22 14:31:51 -0700843 return ret;
844}
845
846/*
Andrew Boie4ce652e2019-02-22 16:08:44 -0800847 * Application memory region initialization
848 */
849
850extern char __app_shmem_regions_start[];
851extern char __app_shmem_regions_end[];
852
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +0200853static int app_shmem_bss_zero(const struct device *unused)
Andrew Boie4ce652e2019-02-22 16:08:44 -0800854{
855 struct z_app_region *region, *end;
856
Andrew Boiefb1c2942020-03-16 11:20:08 -0700857 ARG_UNUSED(unused);
858
Andrew Boie4ce652e2019-02-22 16:08:44 -0800859 end = (struct z_app_region *)&__app_shmem_regions_end;
860 region = (struct z_app_region *)&__app_shmem_regions_start;
861
862 for ( ; region < end; region++) {
Daniel Leung2117a2a2021-07-12 13:33:32 -0700863#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
864 /* When BSS sections are not present at boot, we need to wait for
865 * paging mechanism to be initialized before we can zero out BSS.
866 */
867 extern bool z_sys_post_kernel;
868 bool do_clear = z_sys_post_kernel;
869
870 /* During pre-kernel init, z_sys_post_kernel == false, but
871 * with pinned rodata region, so clear. Otherwise skip.
872 * In post-kernel init, z_sys_post_kernel == true,
873 * skip those in pinned rodata region as they have already
874 * been cleared and possibly already in use. Otherwise clear.
875 */
876 if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
877 ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
878 do_clear = !do_clear;
879 }
880
881 if (do_clear)
882#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
883 {
884 (void)memset(region->bss_start, 0, region->bss_size);
885 }
Andrew Boie4ce652e2019-02-22 16:08:44 -0800886 }
Andrew Boiefb1c2942020-03-16 11:20:08 -0700887
888 return 0;
Andrew Boie4ce652e2019-02-22 16:08:44 -0800889}
890
Andrew Boiefb1c2942020-03-16 11:20:08 -0700891SYS_INIT(app_shmem_bss_zero, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
892
Daniel Leung2117a2a2021-07-12 13:33:32 -0700893#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
894/* When BSS sections are not present at boot, we need to wait for
895 * paging mechanism to be initialized before we can zero out BSS.
896 */
897SYS_INIT(app_shmem_bss_zero, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
898#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
899
Andrew Boie4ce652e2019-02-22 16:08:44 -0800900/*
Andrew Boiec8188f62018-06-22 14:31:51 -0700901 * Default handlers if otherwise unimplemented
902 */
903
Andrew Boie800b35f2019-11-05 09:27:18 -0800904static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
905 uintptr_t arg3, uintptr_t arg4,
906 uintptr_t arg5, uintptr_t arg6,
907 void *ssf)
Andrew Boief5649862017-09-08 12:10:12 -0700908{
Andrew Boie800b35f2019-11-05 09:27:18 -0800909 LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
Andrew Boie64c81892020-05-28 16:24:09 -0700910 arch_syscall_oops(ssf);
Andrew Boie777336e2019-06-24 09:35:55 -0700911 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boief5649862017-09-08 12:10:12 -0700912}
913
Andrew Boie800b35f2019-11-05 09:27:18 -0800914static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
915 uintptr_t arg3, uintptr_t arg4,
916 uintptr_t arg5, uintptr_t arg6, void *ssf)
Andrew Boiefa94ee72017-09-28 16:54:35 -0700917{
Andrew Boie99b3f862019-09-30 14:25:23 -0700918 LOG_ERR("Unimplemented system call");
Andrew Boie64c81892020-05-28 16:24:09 -0700919 arch_syscall_oops(ssf);
Andrew Boie777336e2019-06-24 09:35:55 -0700920 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boiefa94ee72017-09-28 16:54:35 -0700921}
Andrew Boiefc273c02017-09-23 12:51:23 -0700922
Andrew Boiefa94ee72017-09-28 16:54:35 -0700923#include <syscall_dispatch.c>