blob: c477fe52aace35c6a66358b46a0cf02b5bb65fcd [file] [log] [blame]
Andrew Boie945af952017-08-22 13:15:23 -07001/*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/kernel.h>
Andrew Boie945af952017-08-22 13:15:23 -07009#include <string.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020010#include <zephyr/sys/math_extras.h>
11#include <zephyr/sys/rb.h>
12#include <zephyr/kernel_structs.h>
13#include <zephyr/sys/sys_io.h>
Andrew Boie5cfa5dc2017-08-30 14:17:44 -070014#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020015#include <zephyr/syscall.h>
Anas Nashif4e396172023-09-26 22:46:01 +000016#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020017#include <zephyr/device.h>
18#include <zephyr/init.h>
Flavio Ceolin8a148172018-12-16 12:39:44 -080019#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020020#include <zephyr/app_memory/app_memdomain.h>
21#include <zephyr/sys/libc-hooks.h>
22#include <zephyr/sys/mutex.h>
Andrew Boie800b35f2019-11-05 09:27:18 -080023#include <inttypes.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020024#include <zephyr/linker/linker-defs.h>
Andrew Boie17ce8222019-02-21 13:44:54 -080025
Andrew Boie77070602019-02-27 20:12:40 -080026#ifdef Z_LIBC_PARTITION_EXISTS
Andrew Boie17ce8222019-02-21 13:44:54 -080027K_APPMEM_PARTITION_DEFINE(z_libc_partition);
Simon Heinbcd1d192024-03-08 12:00:10 +010028#endif /* Z_LIBC_PARTITION_EXISTS */
Anas Nashif0a0c8c82018-09-17 06:58:09 -050029
Andrew Boiee686aef2019-02-27 14:41:45 -080030/* TODO: Find a better place to put this. Since we pull the entire
Anas Nashif6e27d6d2019-05-09 08:43:30 -040031 * lib..__modules__crypto__mbedtls.a globals into app shared memory
32 * section, we can't put this in zephyr_init.c of the mbedtls module.
Andrew Boiee686aef2019-02-27 14:41:45 -080033 */
34#ifdef CONFIG_MBEDTLS
35K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
Simon Heinbcd1d192024-03-08 12:00:10 +010036#endif /* CONFIG_MBEDTLS */
Andrew Boiee686aef2019-02-27 14:41:45 -080037
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020038#include <zephyr/logging/log.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010039LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Anas Nashif0a0c8c82018-09-17 06:58:09 -050040
Andy Ross8a3d57b2019-02-06 09:10:36 -080041/* The originally synchronization strategy made heavy use of recursive
42 * irq_locking, which ports poorly to spinlocks which are
43 * non-recursive. Rather than try to redesign as part of
44 * spinlockification, this uses multiple locks to preserve the
45 * original semantics exactly. The locks are named for the data they
46 * protect where possible, or just for the code that uses them where
47 * not.
48 */
49#ifdef CONFIG_DYNAMIC_OBJECTS
Flavio Ceolin2b1106a2023-07-14 12:24:33 -070050static struct k_spinlock lists_lock; /* kobj dlist */
Andy Ross8a3d57b2019-02-06 09:10:36 -080051static struct k_spinlock objfree_lock; /* k_object_free */
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070052
53#ifdef CONFIG_GEN_PRIV_STACKS
Nikolay Agishevbaeda6a2023-09-20 15:35:58 +030054/* On ARM & ARC MPU we may have two different alignment requirement
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070055 * when dynamically allocating thread stacks, one for the privileged
56 * stack and other for the user stack, so we need to account the
57 * worst alignment scenario and reserve space for that.
58 */
Nikolay Agishevbaeda6a2023-09-20 15:35:58 +030059#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070060#define STACK_ELEMENT_DATA_SIZE(size) \
61 (sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
Daniel Leungd34351d2024-03-22 14:11:49 -070062 Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070063#else
64#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
Daniel Leungd34351d2024-03-22 14:11:49 -070065 K_THREAD_STACK_LEN(size))
Nikolay Agishevbaeda6a2023-09-20 15:35:58 +030066#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070067#else
Daniel Leungd34351d2024-03-22 14:11:49 -070068#define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -070069#endif /* CONFIG_GEN_PRIV_STACKS */
70
Simon Heinbcd1d192024-03-08 12:00:10 +010071#endif /* CONFIG_DYNAMIC_OBJECTS */
Andy Ross8a3d57b2019-02-06 09:10:36 -080072static struct k_spinlock obj_lock; /* kobj struct data */
Andy Ross8a3d57b2019-02-06 09:10:36 -080073
Andrew Boie25742192017-10-16 15:29:30 -070074#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
75
Daniel Leunge58b6542018-08-08 11:23:16 -070076#ifdef CONFIG_DYNAMIC_OBJECTS
Kumar Galaa1b77fd2020-05-27 11:26:57 -050077extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
Simon Heinbcd1d192024-03-08 12:00:10 +010078#endif /* CONFIG_DYNAMIC_OBJECTS */
Daniel Leunge58b6542018-08-08 11:23:16 -070079
Anas Nashifa6b49002023-09-26 21:37:25 +000080static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
Daniel Leunge58b6542018-08-08 11:23:16 -070081
Andrew Boie945af952017-08-22 13:15:23 -070082const char *otype_to_str(enum k_objects otype)
83{
Flavio Ceolin3259ac02018-09-11 13:14:21 -070084 const char *ret;
Andrew Boie945af952017-08-22 13:15:23 -070085 /* -fdata-sections doesn't work right except in very very recent
86 * GCC and these literal strings would appear in the binary even if
87 * otype_to_str was omitted by the linker
88 */
Andrew Boiecb1dd742019-10-01 10:28:32 -070089#ifdef CONFIG_LOG
Andrew Boie945af952017-08-22 13:15:23 -070090 switch (otype) {
Leandro Pereira39dc7d02018-04-05 13:59:33 -070091 /* otype-to-str.h is generated automatically during build by
92 * gen_kobject_list.py
93 */
Andrew Boiebe919d32020-05-29 17:49:02 -070094 case K_OBJ_ANY:
95 ret = "generic";
96 break;
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +080097#include <zephyr/otype-to-str.h>
Andrew Boie945af952017-08-22 13:15:23 -070098 default:
Flavio Ceolin3259ac02018-09-11 13:14:21 -070099 ret = "?";
100 break;
Andrew Boie945af952017-08-22 13:15:23 -0700101 }
102#else
103 ARG_UNUSED(otype);
Maksim Masalskid6c9d402021-05-24 16:30:32 +0800104 ret = NULL;
Simon Heinbcd1d192024-03-08 12:00:10 +0100105#endif /* CONFIG_LOG */
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700106 return ret;
Andrew Boie945af952017-08-22 13:15:23 -0700107}
108
Andrew Boie47f8fd12017-10-05 11:11:02 -0700109struct perm_ctx {
110 int parent_id;
111 int child_id;
112 struct k_thread *parent;
113};
114
Andrew Boie28be7932020-03-11 10:56:19 -0700115#ifdef CONFIG_GEN_PRIV_STACKS
Anas Nashifefbadbb2022-07-11 10:53:29 -0400116/* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
Andrew Boie28be7932020-03-11 10:56:19 -0700117 * mode stacks are allocated as an array. The base of the array is
118 * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
119 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500120uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
Andrew Boie28be7932020-03-11 10:56:19 -0700121{
Anas Nashifc25d0802023-09-27 10:49:28 +0000122 struct k_object *obj = k_object_find(stack);
Andrew Boie28be7932020-03-11 10:56:19 -0700123
124 __ASSERT(obj != NULL, "stack object not found");
125 __ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
126 "bad stack object");
127
128 return obj->data.stack_data->priv;
129}
130#endif /* CONFIG_GEN_PRIV_STACKS */
131
Andrew Boie31bdfc02017-11-08 16:38:03 -0800132#ifdef CONFIG_DYNAMIC_OBJECTS
Daniel Leungfe477ea2020-12-15 13:50:48 -0800133
134/*
135 * Note that dyn_obj->data is where the kernel object resides
136 * so it is the one that actually needs to be aligned.
Jordan Yates07870932024-06-21 18:37:21 +1000137 * Due to the need to get the fields inside struct dyn_obj
Daniel Leungfe477ea2020-12-15 13:50:48 -0800138 * from kernel object pointers (i.e. from data[]), the offset
139 * from data[] needs to be fixed at build time. Therefore,
140 * data[] is declared with __aligned(), such that when dyn_obj
141 * is allocated with alignment, data[] is also aligned.
142 * Due to this requirement, data[] needs to be aligned with
143 * the maximum alignment needed for all kernel objects
144 * (hence the following DYN_OBJ_DATA_ALIGN).
145 */
Peter Mitsis48f51642021-12-16 12:47:32 -0500146#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
147#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
Daniel Leungfe477ea2020-12-15 13:50:48 -0800148#else
149#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
Simon Heinbcd1d192024-03-08 12:00:10 +0100150#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
Daniel Leungfe477ea2020-12-15 13:50:48 -0800151
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700152#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
153#ifndef CONFIG_MPU_STACK_GUARD
154#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
155 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
156#else
157#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
158 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
159#endif /* !CONFIG_MPU_STACK_GUARD */
160#else
161#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
162 Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
163#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
164
Daniel Leungfe477ea2020-12-15 13:50:48 -0800165#define DYN_OBJ_DATA_ALIGN \
166 MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
167
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700168struct dyn_obj {
Anas Nashifa6b49002023-09-26 21:37:25 +0000169 struct k_object kobj;
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700170 sys_dnode_t dobj_list;
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700171
172 /* The object itself */
173 void *data;
174};
175
Anas Nashifa6b49002023-09-26 21:37:25 +0000176extern struct k_object *z_object_gperf_find(const void *obj);
Patrik Flykt4344e272019-03-08 14:19:05 -0700177extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
Andrew Boie31bdfc02017-11-08 16:38:03 -0800178 void *context);
179
Andrew Boie97bf0012018-04-24 17:01:37 -0700180/*
181 * Linked list of allocated kernel objects, for iteration over all allocated
182 * objects (and potentially deleting them during iteration).
183 */
184static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
185
186/*
Flavio Ceolin2b1106a2023-07-14 12:24:33 -0700187 * TODO: Write some hash table code that will replace obj_list.
Andrew Boie97bf0012018-04-24 17:01:37 -0700188 */
189
Andrew Boie31bdfc02017-11-08 16:38:03 -0800190static size_t obj_size_get(enum k_objects otype)
191{
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700192 size_t ret;
193
Andrew Boie31bdfc02017-11-08 16:38:03 -0800194 switch (otype) {
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800195#include <zephyr/otype-to-size.h>
Andrew Boie31bdfc02017-11-08 16:38:03 -0800196 default:
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +0200197 ret = sizeof(const struct device);
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700198 break;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800199 }
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700200
201 return ret;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800202}
203
Daniel Leungfe477ea2020-12-15 13:50:48 -0800204static size_t obj_align_get(enum k_objects otype)
205{
206 size_t ret;
207
208 switch (otype) {
209 case K_OBJ_THREAD:
Peter Mitsis48f51642021-12-16 12:47:32 -0500210#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
211 ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
Daniel Leungfe477ea2020-12-15 13:50:48 -0800212#else
Daniel Leungb6dd9602021-12-13 14:54:51 -0800213 ret = __alignof(struct dyn_obj);
Simon Heinbcd1d192024-03-08 12:00:10 +0100214#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
Daniel Leungfe477ea2020-12-15 13:50:48 -0800215 break;
216 default:
Daniel Leungb6dd9602021-12-13 14:54:51 -0800217 ret = __alignof(struct dyn_obj);
Daniel Leungfe477ea2020-12-15 13:50:48 -0800218 break;
219 }
220
221 return ret;
222}
223
Hess Nathane05c4a82024-05-07 13:22:50 +0200224static struct dyn_obj *dyn_object_find(const void *obj)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800225{
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700226 struct dyn_obj *node;
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700227 k_spinlock_key_t key;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800228
229 /* For any dynamically allocated kernel object, the object
Naiyuan Tianbc3fda42021-08-23 23:32:58 +0800230 * pointer is just a member of the containing struct dyn_obj,
Andrew Boie31bdfc02017-11-08 16:38:03 -0800231 * so just a little arithmetic is necessary to locate the
232 * corresponding struct rbnode
233 */
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700234 key = k_spin_lock(&lists_lock);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800235
Flavio Ceolin2b1106a2023-07-14 12:24:33 -0700236 SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
237 if (node->kobj.name == obj) {
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700238 goto end;
239 }
Andrew Boie31bdfc02017-11-08 16:38:03 -0800240 }
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700241
242 /* No object found */
Flavio Ceolin2b1106a2023-07-14 12:24:33 -0700243 node = NULL;
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700244
245 end:
Andy Ross8a3d57b2019-02-06 09:10:36 -0800246 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800247
Flavio Ceolin2b1106a2023-07-14 12:24:33 -0700248 return node;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800249}
250
Daniel Leunge58b6542018-08-08 11:23:16 -0700251/**
252 * @internal
253 *
254 * @brief Allocate a new thread index for a new thread.
255 *
256 * This finds an unused thread index that can be assigned to a new
257 * thread. If too many threads have been allocated, the kernel will
258 * run out of indexes and this function will fail.
259 *
260 * Note that if an unused index is found, that index will be marked as
261 * used after return of this function.
262 *
263 * @param tidx The new thread index if successful
264 *
Flavio Ceolin8a148172018-12-16 12:39:44 -0800265 * @return true if successful, false if failed
Daniel Leunge58b6542018-08-08 11:23:16 -0700266 **/
Andrew Boie428afe52019-11-18 10:20:16 -0800267static bool thread_idx_alloc(uintptr_t *tidx)
Daniel Leunge58b6542018-08-08 11:23:16 -0700268{
269 int i;
270 int idx;
271 int base;
272
273 base = 0;
274 for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
275 idx = find_lsb_set(_thread_idx_map[i]);
276
Flavio Ceolin76b35182018-12-16 12:48:29 -0800277 if (idx != 0) {
Daniel Leunge58b6542018-08-08 11:23:16 -0700278 *tidx = base + (idx - 1);
279
Daniel Leung2ad265c2024-05-14 16:16:10 -0700280 /* Clear the bit. We already know the array index,
281 * and the bit to be cleared.
282 */
283 _thread_idx_map[i] &= ~(BIT(idx - 1));
Daniel Leunge58b6542018-08-08 11:23:16 -0700284
285 /* Clear permission from all objects */
Anas Nashif27d74e92023-09-27 10:48:38 +0000286 k_object_wordlist_foreach(clear_perms_cb,
Daniel Leunge58b6542018-08-08 11:23:16 -0700287 (void *)*tidx);
288
Flavio Ceolin8a148172018-12-16 12:39:44 -0800289 return true;
Daniel Leunge58b6542018-08-08 11:23:16 -0700290 }
291
292 base += 8;
293 }
294
Flavio Ceolin8a148172018-12-16 12:39:44 -0800295 return false;
Daniel Leunge58b6542018-08-08 11:23:16 -0700296}
297
298/**
299 * @internal
300 *
301 * @brief Free a thread index.
302 *
303 * This frees a thread index so it can be used by another
304 * thread.
305 *
306 * @param tidx The thread index to be freed
307 **/
Andrew Boie428afe52019-11-18 10:20:16 -0800308static void thread_idx_free(uintptr_t tidx)
Daniel Leunge58b6542018-08-08 11:23:16 -0700309{
310 /* To prevent leaked permission when index is recycled */
Anas Nashif27d74e92023-09-27 10:48:38 +0000311 k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
Daniel Leunge58b6542018-08-08 11:23:16 -0700312
Daniel Leung2ad265c2024-05-14 16:16:10 -0700313 /* Figure out which bits to set in _thread_idx_map[] and set it. */
314 int base = tidx / NUM_BITS(_thread_idx_map[0]);
315 int offset = tidx % NUM_BITS(_thread_idx_map[0]);
316
317 _thread_idx_map[base] |= BIT(offset);
Daniel Leunge58b6542018-08-08 11:23:16 -0700318}
319
Anas Nashifa6b49002023-09-26 21:37:25 +0000320static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700321 size_t size)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800322{
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700323 struct dyn_obj *dyn;
324
325 dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
326 if (dyn == NULL) {
327 return NULL;
328 }
Andrew Boie31bdfc02017-11-08 16:38:03 -0800329
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700330 if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700331 size_t adjusted_size;
332
333 if (size == 0) {
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700334 k_free(dyn);
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700335 return NULL;
336 }
337
338 adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700339 dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700340 adjusted_size);
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700341 if (dyn->data == NULL) {
342 k_free(dyn);
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700343 return NULL;
344 }
345
346#ifdef CONFIG_GEN_PRIV_STACKS
347 struct z_stack_data *stack_data = (struct z_stack_data *)
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700348 ((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
349 stack_data->priv = (uint8_t *)dyn->data;
Ederson de Souza4440d6a2024-02-15 20:31:50 -0800350 stack_data->size = adjusted_size;
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700351 dyn->kobj.data.stack_data = stack_data;
Nikolay Agishevbaeda6a2023-09-20 15:35:58 +0300352#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700353 dyn->kobj.name = (void *)ROUND_UP(
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700354 ((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700355 Z_THREAD_STACK_OBJ_ALIGN(size));
356#else
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700357 dyn->kobj.name = dyn->data;
Simon Heinbcd1d192024-03-08 12:00:10 +0100358#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700359#else
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700360 dyn->kobj.name = dyn->data;
Ederson de Souza4440d6a2024-02-15 20:31:50 -0800361 dyn->kobj.data.stack_size = adjusted_size;
Simon Heinbcd1d192024-03-08 12:00:10 +0100362#endif /* CONFIG_GEN_PRIV_STACKS */
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700363 } else {
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700364 dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
365 if (dyn->data == NULL) {
366 k_free(dyn->data);
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700367 return NULL;
368 }
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700369 dyn->kobj.name = dyn->data;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800370 }
371
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700372 dyn->kobj.type = otype;
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700373 dyn->kobj.flags = 0;
374 (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800375
Andy Ross8a3d57b2019-02-06 09:10:36 -0800376 k_spinlock_key_t key = k_spin_lock(&lists_lock);
377
Daniel Leungabfe0452021-04-27 11:49:30 -0700378 sys_dlist_append(&obj_list, &dyn->dobj_list);
Andy Ross8a3d57b2019-02-06 09:10:36 -0800379 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800380
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700381 return &dyn->kobj;
Andrew Boiebe919d32020-05-29 17:49:02 -0700382}
383
Anas Nashifa6b49002023-09-26 21:37:25 +0000384struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700385{
Anas Nashifa6b49002023-09-26 21:37:25 +0000386 struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700387
388 if (obj == NULL) {
389 LOG_ERR("could not allocate kernel object, out of memory");
390 }
391
392 return obj;
393}
394
395static void *z_object_alloc(enum k_objects otype, size_t size)
Andrew Boiebe919d32020-05-29 17:49:02 -0700396{
Anas Nashifa6b49002023-09-26 21:37:25 +0000397 struct k_object *zo;
Ioannis Glaropoulos8ada29e2020-06-11 09:53:01 +0200398 uintptr_t tidx = 0;
Andrew Boiebe919d32020-05-29 17:49:02 -0700399
Hess Nathan6d417d52024-04-30 13:26:35 +0200400 if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
Andrew Boiebe919d32020-05-29 17:49:02 -0700401 LOG_ERR("bad object type %d requested", otype);
402 return NULL;
403 }
404
405 switch (otype) {
406 case K_OBJ_THREAD:
407 if (!thread_idx_alloc(&tidx)) {
408 LOG_ERR("out of free thread indexes");
409 return NULL;
410 }
411 break;
412 /* The following are currently not allowed at all */
413 case K_OBJ_FUTEX: /* Lives in user memory */
414 case K_OBJ_SYS_MUTEX: /* Lives in user memory */
Andrew Boiebe919d32020-05-29 17:49:02 -0700415 case K_OBJ_NET_SOCKET: /* Indeterminate size */
416 LOG_ERR("forbidden object type '%s' requested",
417 otype_to_str(otype));
418 return NULL;
419 default:
420 /* Remainder within bounds are permitted */
421 break;
422 }
423
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700424 zo = dynamic_object_create(otype, obj_align_get(otype), size);
Andrew Boiebe919d32020-05-29 17:49:02 -0700425 if (zo == NULL) {
Nicolas Pitre962b3742022-03-13 18:28:59 -0400426 if (otype == K_OBJ_THREAD) {
427 thread_idx_free(tidx);
428 }
Andrew Boiebe919d32020-05-29 17:49:02 -0700429 return NULL;
430 }
Andrew Boiebe919d32020-05-29 17:49:02 -0700431
432 if (otype == K_OBJ_THREAD) {
433 zo->data.thread_id = tidx;
434 }
435
436 /* The allocating thread implicitly gets permission on kernel objects
437 * that it allocates
438 */
Anas Nashif993f9032023-09-27 10:47:01 +0000439 k_thread_perms_set(zo, _current);
Andrew Boiebe919d32020-05-29 17:49:02 -0700440
441 /* Activates reference counting logic for automatic disposal when
442 * all permissions have been revoked
443 */
444 zo->flags |= K_OBJ_FLAG_ALLOC;
445
446 return zo->name;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800447}
448
Flavio Ceolin67e66e42023-06-22 06:27:28 +0000449void *z_impl_k_object_alloc(enum k_objects otype)
450{
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700451 return z_object_alloc(otype, 0);
Flavio Ceolin67e66e42023-06-22 06:27:28 +0000452}
453
454void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
455{
456 return z_object_alloc(otype, size);
457}
458
Andrew Boie31bdfc02017-11-08 16:38:03 -0800459void k_object_free(void *obj)
460{
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700461 struct dyn_obj *dyn;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800462
463 /* This function is intentionally not exposed to user mode.
464 * There's currently no robust way to track that an object isn't
465 * being used by some other thread
466 */
467
Andy Ross8a3d57b2019-02-06 09:10:36 -0800468 k_spinlock_key_t key = k_spin_lock(&objfree_lock);
469
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700470 dyn = dyn_object_find(obj);
471 if (dyn != NULL) {
Daniel Leungabfe0452021-04-27 11:49:30 -0700472 sys_dlist_remove(&dyn->dobj_list);
Daniel Leunge58b6542018-08-08 11:23:16 -0700473
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700474 if (dyn->kobj.type == K_OBJ_THREAD) {
475 thread_idx_free(dyn->kobj.data.thread_id);
Daniel Leunge58b6542018-08-08 11:23:16 -0700476 }
Andrew Boie31bdfc02017-11-08 16:38:03 -0800477 }
Andy Ross8a3d57b2019-02-06 09:10:36 -0800478 k_spin_unlock(&objfree_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800479
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700480 if (dyn != NULL) {
Flavio Ceolined8355a2023-07-18 21:00:07 +0000481 k_free(dyn->data);
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700482 k_free(dyn);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800483 }
484}
485
Anas Nashifc25d0802023-09-27 10:49:28 +0000486struct k_object *k_object_find(const void *obj)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800487{
Anas Nashifa6b49002023-09-26 21:37:25 +0000488 struct k_object *ret;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800489
Patrik Flykt4344e272019-03-08 14:19:05 -0700490 ret = z_object_gperf_find(obj);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800491
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700492 if (ret == NULL) {
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700493 struct dyn_obj *dyn;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800494
Peter Bigot2fcf7622020-05-14 05:06:08 -0500495 /* The cast to pointer-to-non-const violates MISRA
496 * 11.8 but is justified since we know dynamic objects
497 * were not declared with a const qualifier.
498 */
Hess Nathane05c4a82024-05-07 13:22:50 +0200499 dyn = dyn_object_find(obj);
Flavio Ceolin3b7e0b62023-06-23 09:34:14 -0700500 if (dyn != NULL) {
501 ret = &dyn->kobj;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800502 }
503 }
504
505 return ret;
506}
507
Anas Nashif27d74e92023-09-27 10:48:38 +0000508void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
Andrew Boie31bdfc02017-11-08 16:38:03 -0800509{
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700510 struct dyn_obj *obj, *next;
Andrew Boie31bdfc02017-11-08 16:38:03 -0800511
Patrik Flykt4344e272019-03-08 14:19:05 -0700512 z_object_gperf_wordlist_foreach(func, context);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800513
Andy Ross8a3d57b2019-02-06 09:10:36 -0800514 k_spinlock_key_t key = k_spin_lock(&lists_lock);
515
Daniel Leungabfe0452021-04-27 11:49:30 -0700516 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
Andrew Boie97bf0012018-04-24 17:01:37 -0700517 func(&obj->kobj, context);
518 }
Andy Ross8a3d57b2019-02-06 09:10:36 -0800519 k_spin_unlock(&lists_lock, key);
Andrew Boie31bdfc02017-11-08 16:38:03 -0800520}
521#endif /* CONFIG_DYNAMIC_OBJECTS */
522
Andrew Boief2734ab2020-03-11 06:37:42 -0700523static unsigned int thread_index_get(struct k_thread *thread)
Andrew Boie818a96d2017-11-03 09:00:35 -0700524{
Anas Nashifa6b49002023-09-26 21:37:25 +0000525 struct k_object *ko;
Andrew Boie818a96d2017-11-03 09:00:35 -0700526
Anas Nashifc25d0802023-09-27 10:49:28 +0000527 ko = k_object_find(thread);
Andrew Boie818a96d2017-11-03 09:00:35 -0700528
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700529 if (ko == NULL) {
Andrew Boie818a96d2017-11-03 09:00:35 -0700530 return -1;
531 }
532
Andrew Boief2734ab2020-03-11 06:37:42 -0700533 return ko->data.thread_id;
Andrew Boie818a96d2017-11-03 09:00:35 -0700534}
535
Anas Nashifa6b49002023-09-26 21:37:25 +0000536static void unref_check(struct k_object *ko, uintptr_t index)
Andrew Boie337e7432018-04-13 14:44:00 -0700537{
Andy Ross8a3d57b2019-02-06 09:10:36 -0800538 k_spinlock_key_t key = k_spin_lock(&obj_lock);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800539
540 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
541
542#ifdef CONFIG_DYNAMIC_OBJECTS
Daniel Leungb6dd9602021-12-13 14:54:51 -0800543 if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
544 /* skip unref check for static kernel object */
545 goto out;
546 }
547
Carles Cufi55350a92021-12-04 19:57:03 +0100548 void *vko = ko;
549
Flavio Ceolincbbe6d22023-07-18 13:11:07 -0700550 struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800551
Daniel Leungb6dd9602021-12-13 14:54:51 -0800552 __ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
Andrew Boie7ecc3592019-01-31 12:09:06 -0800553
Andrew Boie337e7432018-04-13 14:44:00 -0700554 for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
Patrik Flykt24d71432019-03-26 19:57:45 -0600555 if (ko->perms[i] != 0U) {
Andrew Boie7ecc3592019-01-31 12:09:06 -0800556 goto out;
Andrew Boie337e7432018-04-13 14:44:00 -0700557 }
558 }
559
560 /* This object has no more references. Some objects may have
561 * dynamically allocated resources, require cleanup, or need to be
Nguyen Minh Thien8188be52024-02-19 13:16:58 +0100562 * marked as uninitialized when all references are gone. What
Andrew Boie337e7432018-04-13 14:44:00 -0700563 * specifically needs to happen depends on the object type.
564 */
565 switch (ko->type) {
Peter Mitsisf86027f2022-07-08 11:27:09 -0400566#ifdef CONFIG_PIPES
Andrew Boie44fe8122018-04-12 17:38:12 -0700567 case K_OBJ_PIPE:
568 k_pipe_cleanup((struct k_pipe *)ko->name);
569 break;
Simon Heinbcd1d192024-03-08 12:00:10 +0100570#endif /* CONFIG_PIPES */
Andrew Boie0fe789f2018-04-12 18:35:56 -0700571 case K_OBJ_MSGQ:
572 k_msgq_cleanup((struct k_msgq *)ko->name);
573 break;
Andrew Boief3bee952018-05-02 17:44:39 -0700574 case K_OBJ_STACK:
575 k_stack_cleanup((struct k_stack *)ko->name);
576 break;
Andrew Boie337e7432018-04-13 14:44:00 -0700577 default:
Flavio Ceolin3259ac02018-09-11 13:14:21 -0700578 /* Nothing to do */
Andrew Boie337e7432018-04-13 14:44:00 -0700579 break;
580 }
Andrew Boie97bf0012018-04-24 17:01:37 -0700581
Daniel Leungabfe0452021-04-27 11:49:30 -0700582 sys_dlist_remove(&dyn->dobj_list);
Flavio Ceolined8355a2023-07-18 21:00:07 +0000583 k_free(dyn->data);
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -0700584 k_free(dyn);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800585out:
Simon Heinbcd1d192024-03-08 12:00:10 +0100586#endif /* CONFIG_DYNAMIC_OBJECTS */
Andy Ross8a3d57b2019-02-06 09:10:36 -0800587 k_spin_unlock(&obj_lock, key);
Andrew Boie337e7432018-04-13 14:44:00 -0700588}
589
Anas Nashifa6b49002023-09-26 21:37:25 +0000590static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
Andrew Boie47f8fd12017-10-05 11:11:02 -0700591{
592 struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
593
594 if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
Hess Nathan6d417d52024-04-30 13:26:35 +0200595 ((struct k_thread *)ko->name != ctx->parent)) {
Andrew Boie47f8fd12017-10-05 11:11:02 -0700596 sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
597 }
598}
599
Anas Nashifa5b49452023-09-27 10:47:50 +0000600void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
Andrew Boie47f8fd12017-10-05 11:11:02 -0700601{
602 struct perm_ctx ctx = {
Andrew Boie818a96d2017-11-03 09:00:35 -0700603 thread_index_get(parent),
604 thread_index_get(child),
Andrew Boie47f8fd12017-10-05 11:11:02 -0700605 parent
606 };
607
Andrew Boie818a96d2017-11-03 09:00:35 -0700608 if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
Anas Nashif27d74e92023-09-27 10:48:38 +0000609 k_object_wordlist_foreach(wordlist_cb, &ctx);
Andrew Boie47f8fd12017-10-05 11:11:02 -0700610 }
611}
612
Anas Nashif993f9032023-09-27 10:47:01 +0000613void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
Andrew Boie945af952017-08-22 13:15:23 -0700614{
Andrew Boie818a96d2017-11-03 09:00:35 -0700615 int index = thread_index_get(thread);
616
617 if (index != -1) {
618 sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
Andrew Boie2acfcd62017-08-30 14:31:03 -0700619 }
Andrew Boie945af952017-08-22 13:15:23 -0700620}
621
Anas Nashife2a9d012023-09-27 10:46:26 +0000622void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
Andrew Boiea89bf012017-10-09 14:47:55 -0700623{
Andrew Boie818a96d2017-11-03 09:00:35 -0700624 int index = thread_index_get(thread);
625
626 if (index != -1) {
Andy Ross8a3d57b2019-02-06 09:10:36 -0800627 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
Andrew Boie7ecc3592019-01-31 12:09:06 -0800628 unref_check(ko, index);
Andrew Boiea89bf012017-10-09 14:47:55 -0700629 }
630}
631
Anas Nashifa6b49002023-09-26 21:37:25 +0000632static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
Andrew Boie04caa672017-10-13 13:57:07 -0700633{
Andrew Boie428afe52019-11-18 10:20:16 -0800634 uintptr_t id = (uintptr_t)ctx_ptr;
Andrew Boie04caa672017-10-13 13:57:07 -0700635
Andrew Boie7ecc3592019-01-31 12:09:06 -0800636 unref_check(ko, id);
Andrew Boie04caa672017-10-13 13:57:07 -0700637}
638
Anas Nashif70cf96b2023-09-27 10:45:48 +0000639void k_thread_perms_all_clear(struct k_thread *thread)
Andrew Boie04caa672017-10-13 13:57:07 -0700640{
Andrew Boie428afe52019-11-18 10:20:16 -0800641 uintptr_t index = thread_index_get(thread);
Andrew Boie818a96d2017-11-03 09:00:35 -0700642
Carlo Caionef1612232020-10-12 12:10:45 +0200643 if ((int)index != -1) {
Anas Nashif27d74e92023-09-27 10:48:38 +0000644 k_object_wordlist_foreach(clear_perms_cb, (void *)index);
Andrew Boie04caa672017-10-13 13:57:07 -0700645 }
646}
647
Anas Nashifa6b49002023-09-26 21:37:25 +0000648static int thread_perms_test(struct k_object *ko)
Andrew Boie945af952017-08-22 13:15:23 -0700649{
Andrew Boie818a96d2017-11-03 09:00:35 -0700650 int index;
651
Patrik Flykt24d71432019-03-26 19:57:45 -0600652 if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
Andrew Boie04caa672017-10-13 13:57:07 -0700653 return 1;
654 }
655
Andrew Boie818a96d2017-11-03 09:00:35 -0700656 index = thread_index_get(_current);
657 if (index != -1) {
658 return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
Andrew Boie2acfcd62017-08-30 14:31:03 -0700659 }
660 return 0;
Andrew Boie945af952017-08-22 13:15:23 -0700661}
662
Anas Nashifa6b49002023-09-26 21:37:25 +0000663static void dump_permission_error(struct k_object *ko)
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700664{
Andrew Boie818a96d2017-11-03 09:00:35 -0700665 int index = thread_index_get(_current);
Andrew Boie99b3f862019-09-30 14:25:23 -0700666 LOG_ERR("thread %p (%d) does not have permission on %s %p",
667 _current, index,
668 otype_to_str(ko->type), ko->name);
669 LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700670}
Andrew Boie945af952017-08-22 13:15:23 -0700671
Anas Nashif3ab35662023-09-27 10:51:23 +0000672void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700673 enum k_objects otype)
674{
675 switch (retval) {
676 case -EBADF:
Andrew Boie99b3f862019-09-30 14:25:23 -0700677 LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
Andrew Boiebe919d32020-05-29 17:49:02 -0700678 if (ko == NULL) {
679 LOG_ERR("address is not a known kernel object");
680 } else {
681 LOG_ERR("address is actually a %s",
682 otype_to_str(ko->type));
683 }
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700684 break;
685 case -EPERM:
686 dump_permission_error(ko);
687 break;
688 case -EINVAL:
Andrew Boie99b3f862019-09-30 14:25:23 -0700689 LOG_ERR("%p used before initialization", obj);
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700690 break;
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700691 case -EADDRINUSE:
Andrew Boie99b3f862019-09-30 14:25:23 -0700692 LOG_ERR("%p %s in use", obj, otype_to_str(otype));
Flavio Ceolina3cea502018-09-10 22:54:55 -0700693 break;
694 default:
695 /* Not handled error */
696 break;
Andrew Boie945af952017-08-22 13:15:23 -0700697 }
Andrew Boie3b5ae802017-10-04 12:10:32 -0700698}
699
Peter Bigot2fcf7622020-05-14 05:06:08 -0500700void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
Andrew Boie3b5ae802017-10-04 12:10:32 -0700701{
Anas Nashifc25d0802023-09-27 10:49:28 +0000702 struct k_object *ko = k_object_find(object);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700703
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700704 if (ko != NULL) {
Anas Nashif993f9032023-09-27 10:47:01 +0000705 k_thread_perms_set(ko, thread);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700706 }
707}
708
Peter Bigot2fcf7622020-05-14 05:06:08 -0500709void k_object_access_revoke(const void *object, struct k_thread *thread)
Andrew Boiea89bf012017-10-09 14:47:55 -0700710{
Anas Nashifc25d0802023-09-27 10:49:28 +0000711 struct k_object *ko = k_object_find(object);
Andrew Boiea89bf012017-10-09 14:47:55 -0700712
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700713 if (ko != NULL) {
Anas Nashife2a9d012023-09-27 10:46:26 +0000714 k_thread_perms_clear(ko, thread);
Andrew Boiea89bf012017-10-09 14:47:55 -0700715 }
716}
717
Peter Bigot2fcf7622020-05-14 05:06:08 -0500718void z_impl_k_object_release(const void *object)
Andrew Boiee9cfc542018-04-13 13:15:28 -0700719{
720 k_object_access_revoke(object, _current);
721}
722
Peter Bigot2fcf7622020-05-14 05:06:08 -0500723void k_object_access_all_grant(const void *object)
Andrew Boie3b5ae802017-10-04 12:10:32 -0700724{
Anas Nashifc25d0802023-09-27 10:49:28 +0000725 struct k_object *ko = k_object_find(object);
Andrew Boie3b5ae802017-10-04 12:10:32 -0700726
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700727 if (ko != NULL) {
Andrew Boie04caa672017-10-13 13:57:07 -0700728 ko->flags |= K_OBJ_FLAG_PUBLIC;
Andrew Boie3b5ae802017-10-04 12:10:32 -0700729 }
Andrew Boie945af952017-08-22 13:15:23 -0700730}
731
Anas Nashif21254b22023-09-27 10:50:26 +0000732int k_object_validate(struct k_object *ko, enum k_objects otype,
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700733 enum _obj_init_check init)
Andrew Boie945af952017-08-22 13:15:23 -0700734{
Flavio Ceolinea716bf2018-09-20 16:30:45 -0700735 if (unlikely((ko == NULL) ||
Hess Nathan6d417d52024-04-30 13:26:35 +0200736 ((otype != K_OBJ_ANY) && (ko->type != otype)))) {
Andrew Boie945af952017-08-22 13:15:23 -0700737 return -EBADF;
738 }
739
Andrew Boie3a0f6842017-10-09 12:46:25 -0700740 /* Manipulation of any kernel objects by a user thread requires that
741 * thread be granted access first, even for uninitialized objects
Andrew Boie945af952017-08-22 13:15:23 -0700742 */
Flavio Ceolin2df02cc2019-03-14 14:32:45 -0700743 if (unlikely(thread_perms_test(ko) == 0)) {
Andrew Boie945af952017-08-22 13:15:23 -0700744 return -EPERM;
745 }
746
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700747 /* Initialization state checks. _OBJ_INIT_ANY, we don't care */
748 if (likely(init == _OBJ_INIT_TRUE)) {
Naiyuan Tianbc3fda42021-08-23 23:32:58 +0800749 /* Object MUST be initialized */
Patrik Flykt21358ba2019-03-28 14:57:54 -0600750 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700751 return -EINVAL;
752 }
Maksim Masalski929956d2021-05-17 16:58:20 +0800753 } else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700754 /* Object MUST NOT be initialized */
Patrik Flykt21358ba2019-03-28 14:57:54 -0600755 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
Andrew Boiea2b40ec2017-10-15 14:22:08 -0700756 return -EADDRINUSE;
757 }
Flavio Ceolin3e97acc2018-09-25 11:24:28 -0700758 } else {
759 /* _OBJ_INIT_ANY */
Andrew Boie945af952017-08-22 13:15:23 -0700760 }
761
762 return 0;
763}
764
Anas Nashifc91cad72023-09-26 21:32:13 +0000765void k_object_init(const void *obj)
Andrew Boie945af952017-08-22 13:15:23 -0700766{
Anas Nashifa6b49002023-09-26 21:37:25 +0000767 struct k_object *ko;
Andrew Boie945af952017-08-22 13:15:23 -0700768
769 /* By the time we get here, if the caller was from userspace, all the
Anas Nashif21254b22023-09-27 10:50:26 +0000770 * necessary checks have been done in k_object_validate(), which takes
Andrew Boie945af952017-08-22 13:15:23 -0700771 * place before the object is initialized.
772 *
773 * This function runs after the object has been initialized and
774 * finalizes it
775 */
776
Anas Nashifc25d0802023-09-27 10:49:28 +0000777 ko = k_object_find(obj);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700778 if (ko == NULL) {
Andrew Boie945af952017-08-22 13:15:23 -0700779 /* Supervisor threads can ignore rules about kernel objects
780 * and may declare them on stacks, etc. Such objects will never
781 * be usable from userspace, but we shouldn't explode.
782 */
783 return;
784 }
785
Andrew Boie7e3d3d72017-10-10 09:31:32 -0700786 /* Allows non-initialization system calls to be made on this object */
Andrew Boie945af952017-08-22 13:15:23 -0700787 ko->flags |= K_OBJ_FLAG_INITIALIZED;
788}
789
Anas Nashif43a74022023-09-27 10:44:47 +0000790void k_object_recycle(const void *obj)
Andrew Boie83fda7c2018-07-31 14:39:11 -0700791{
Anas Nashifc25d0802023-09-27 10:49:28 +0000792 struct k_object *ko = k_object_find(obj);
Andrew Boie83fda7c2018-07-31 14:39:11 -0700793
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700794 if (ko != NULL) {
Flavio Ceolinda49f2e2018-09-11 19:09:03 -0700795 (void)memset(ko->perms, 0, sizeof(ko->perms));
Anas Nashif993f9032023-09-27 10:47:01 +0000796 k_thread_perms_set(ko, _current);
Andrew Boie83fda7c2018-07-31 14:39:11 -0700797 ko->flags |= K_OBJ_FLAG_INITIALIZED;
798 }
799}
800
Anas Nashif7a18c2b2023-09-27 10:45:18 +0000801void k_object_uninit(const void *obj)
Andrew Boie4a9a4242017-10-05 12:21:36 -0700802{
Anas Nashifa6b49002023-09-26 21:37:25 +0000803 struct k_object *ko;
Andrew Boie4a9a4242017-10-05 12:21:36 -0700804
Anas Nashifc91cad72023-09-26 21:32:13 +0000805 /* See comments in k_object_init() */
Anas Nashifc25d0802023-09-27 10:49:28 +0000806 ko = k_object_find(obj);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700807 if (ko == NULL) {
Andrew Boie4a9a4242017-10-05 12:21:36 -0700808 return;
809 }
810
811 ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
812}
813
Andrew Boiec8188f62018-06-22 14:31:51 -0700814/*
815 * Copy to/from helper functions used in syscall handlers
816 */
Anas Nashif6ba81762023-09-27 10:54:24 +0000817void *k_usermode_alloc_from_copy(const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700818{
819 void *dst = NULL;
Andrew Boiec8188f62018-06-22 14:31:51 -0700820
821 /* Does the caller in user mode have access to read this memory? */
Anas Nashif9c4d8812023-09-27 11:09:45 +0000822 if (K_SYSCALL_MEMORY_READ(src, size)) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700823 goto out_err;
824 }
825
826 dst = z_thread_malloc(size);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700827 if (dst == NULL) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700828 LOG_ERR("out of thread resource pool memory (%zu)", size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700829 goto out_err;
830 }
831
Flavio Ceolin66994232018-08-13 15:17:04 -0700832 (void)memcpy(dst, src, size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700833out_err:
Andrew Boiec8188f62018-06-22 14:31:51 -0700834 return dst;
835}
836
Andrew Boie526807c2019-03-28 15:17:31 -0700837static int user_copy(void *dst, const void *src, size_t size, bool to_user)
Andrew Boiec8188f62018-06-22 14:31:51 -0700838{
839 int ret = EFAULT;
Andrew Boiec8188f62018-06-22 14:31:51 -0700840
841 /* Does the caller in user mode have access to this memory? */
Anas Nashif9c4d8812023-09-27 11:09:45 +0000842 if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
843 K_SYSCALL_MEMORY_READ(src, size)) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700844 goto out_err;
845 }
846
Flavio Ceolin66994232018-08-13 15:17:04 -0700847 (void)memcpy(dst, src, size);
Andrew Boiec8188f62018-06-22 14:31:51 -0700848 ret = 0;
849out_err:
Andrew Boiec8188f62018-06-22 14:31:51 -0700850 return ret;
851}
852
Anas Nashif56fddd82023-09-27 10:54:56 +0000853int k_usermode_from_copy(void *dst, const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700854{
855 return user_copy(dst, src, size, false);
856}
857
Anas Nashif9c1aeb52023-09-27 10:56:59 +0000858int k_usermode_to_copy(void *dst, const void *src, size_t size)
Andrew Boiec8188f62018-06-22 14:31:51 -0700859{
860 return user_copy(dst, src, size, true);
861}
862
Anas Nashif9c1aeb52023-09-27 10:56:59 +0000863char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
Andrew Boiec8188f62018-06-22 14:31:51 -0700864{
Jakob Olesenc8708d92019-05-07 10:17:35 -0700865 size_t actual_len;
Flavio Ceolin0866d182018-08-14 17:57:08 -0700866 int err;
Andrew Boiec8188f62018-06-22 14:31:51 -0700867 char *ret = NULL;
868
Anas Nashif70e79192023-09-27 10:53:39 +0000869 actual_len = k_usermode_string_nlen(src, maxlen, &err);
Flavio Ceolin76b35182018-12-16 12:48:29 -0800870 if (err != 0) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700871 goto out;
872 }
873 if (actual_len == maxlen) {
874 /* Not NULL terminated */
Andrew Boie99b3f862019-09-30 14:25:23 -0700875 LOG_ERR("string too long %p (%zu)", src, actual_len);
Andrew Boiec8188f62018-06-22 14:31:51 -0700876 goto out;
877 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700878 if (size_add_overflow(actual_len, 1, &actual_len)) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700879 LOG_ERR("overflow");
Andrew Boiec8188f62018-06-22 14:31:51 -0700880 goto out;
881 }
882
Anas Nashif6ba81762023-09-27 10:54:24 +0000883 ret = k_usermode_alloc_from_copy(src, actual_len);
Andrew Boie09dc9292019-04-12 12:32:34 -0700884
885 /* Someone may have modified the source string during the above
886 * checks. Ensure what we actually copied is still terminated
887 * properly.
888 */
889 if (ret != NULL) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400890 ret[actual_len - 1U] = '\0';
Andrew Boie09dc9292019-04-12 12:32:34 -0700891 }
Andrew Boiec8188f62018-06-22 14:31:51 -0700892out:
Andrew Boiec8188f62018-06-22 14:31:51 -0700893 return ret;
894}
895
Anas Nashif9c1aeb52023-09-27 10:56:59 +0000896int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
Andrew Boiec8188f62018-06-22 14:31:51 -0700897{
Jakob Olesenc8708d92019-05-07 10:17:35 -0700898 size_t actual_len;
Flavio Ceolin0866d182018-08-14 17:57:08 -0700899 int ret, err;
Andrew Boiec8188f62018-06-22 14:31:51 -0700900
Anas Nashif70e79192023-09-27 10:53:39 +0000901 actual_len = k_usermode_string_nlen(src, maxlen, &err);
Flavio Ceolin76b35182018-12-16 12:48:29 -0800902 if (err != 0) {
Andrew Boiec8188f62018-06-22 14:31:51 -0700903 ret = EFAULT;
904 goto out;
905 }
906 if (actual_len == maxlen) {
907 /* Not NULL terminated */
Andrew Boie99b3f862019-09-30 14:25:23 -0700908 LOG_ERR("string too long %p (%zu)", src, actual_len);
Andrew Boiec8188f62018-06-22 14:31:51 -0700909 ret = EINVAL;
910 goto out;
911 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700912 if (size_add_overflow(actual_len, 1, &actual_len)) {
Andrew Boie99b3f862019-09-30 14:25:23 -0700913 LOG_ERR("overflow");
Andrew Boiec8188f62018-06-22 14:31:51 -0700914 ret = EINVAL;
915 goto out;
916 }
917
Anas Nashif56fddd82023-09-27 10:54:56 +0000918 ret = k_usermode_from_copy(dst, src, actual_len);
Andrew Boie09dc9292019-04-12 12:32:34 -0700919
Anas Nashif9c1aeb52023-09-27 10:56:59 +0000920 /* See comment above in k_usermode_string_alloc_copy() */
Andrew Boie09dc9292019-04-12 12:32:34 -0700921 dst[actual_len - 1] = '\0';
Andrew Boiec8188f62018-06-22 14:31:51 -0700922out:
Andrew Boiec8188f62018-06-22 14:31:51 -0700923 return ret;
924}
925
926/*
Andrew Boie4ce652e2019-02-22 16:08:44 -0800927 * Application memory region initialization
928 */
929
930extern char __app_shmem_regions_start[];
931extern char __app_shmem_regions_end[];
932
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +0200933static int app_shmem_bss_zero(void)
Andrew Boie4ce652e2019-02-22 16:08:44 -0800934{
935 struct z_app_region *region, *end;
936
Andrew Boiefb1c2942020-03-16 11:20:08 -0700937
Hess Nathan32af7242024-04-26 10:23:56 +0200938 end = (struct z_app_region *)&__app_shmem_regions_end[0];
939 region = (struct z_app_region *)&__app_shmem_regions_start[0];
Andrew Boie4ce652e2019-02-22 16:08:44 -0800940
941 for ( ; region < end; region++) {
Daniel Leung2117a2a2021-07-12 13:33:32 -0700942#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
943 /* When BSS sections are not present at boot, we need to wait for
944 * paging mechanism to be initialized before we can zero out BSS.
945 */
946 extern bool z_sys_post_kernel;
947 bool do_clear = z_sys_post_kernel;
948
949 /* During pre-kernel init, z_sys_post_kernel == false, but
950 * with pinned rodata region, so clear. Otherwise skip.
951 * In post-kernel init, z_sys_post_kernel == true,
952 * skip those in pinned rodata region as they have already
953 * been cleared and possibly already in use. Otherwise clear.
954 */
955 if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
956 ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
957 do_clear = !do_clear;
958 }
959
960 if (do_clear)
961#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
962 {
963 (void)memset(region->bss_start, 0, region->bss_size);
964 }
Andrew Boie4ce652e2019-02-22 16:08:44 -0800965 }
Andrew Boiefb1c2942020-03-16 11:20:08 -0700966
967 return 0;
Andrew Boie4ce652e2019-02-22 16:08:44 -0800968}
969
Jordan Yates6f41d522022-07-02 12:06:55 +1000970SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
971 PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
Andrew Boiefb1c2942020-03-16 11:20:08 -0700972
Daniel Leung2117a2a2021-07-12 13:33:32 -0700973#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
974/* When BSS sections are not present at boot, we need to wait for
975 * paging mechanism to be initialized before we can zero out BSS.
976 */
Jordan Yates6f41d522022-07-02 12:06:55 +1000977SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
978 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
Daniel Leung2117a2a2021-07-12 13:33:32 -0700979#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
980
Andrew Boie4ce652e2019-02-22 16:08:44 -0800981/*
Andrew Boiec8188f62018-06-22 14:31:51 -0700982 * Default handlers if otherwise unimplemented
983 */
984
Andrew Boie800b35f2019-11-05 09:27:18 -0800985static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
986 uintptr_t arg3, uintptr_t arg4,
987 uintptr_t arg5, uintptr_t arg6,
988 void *ssf)
Andrew Boief5649862017-09-08 12:10:12 -0700989{
frei tychofe38c702024-05-02 13:33:43 +0000990 ARG_UNUSED(arg2);
991 ARG_UNUSED(arg3);
992 ARG_UNUSED(arg4);
993 ARG_UNUSED(arg5);
994 ARG_UNUSED(arg6);
995
Andrew Boie800b35f2019-11-05 09:27:18 -0800996 LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
Andrew Boie64c81892020-05-28 16:24:09 -0700997 arch_syscall_oops(ssf);
Andrew Boie777336e2019-06-24 09:35:55 -0700998 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boief5649862017-09-08 12:10:12 -0700999}
1000
Andrew Boie800b35f2019-11-05 09:27:18 -08001001static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
1002 uintptr_t arg3, uintptr_t arg4,
1003 uintptr_t arg5, uintptr_t arg6, void *ssf)
Andrew Boiefa94ee72017-09-28 16:54:35 -07001004{
frei tychofe38c702024-05-02 13:33:43 +00001005 ARG_UNUSED(arg1);
1006 ARG_UNUSED(arg2);
1007 ARG_UNUSED(arg3);
1008 ARG_UNUSED(arg4);
1009 ARG_UNUSED(arg5);
1010 ARG_UNUSED(arg6);
1011
Andrew Boie99b3f862019-09-30 14:25:23 -07001012 LOG_ERR("Unimplemented system call");
Andrew Boie64c81892020-05-28 16:24:09 -07001013 arch_syscall_oops(ssf);
Andrew Boie777336e2019-06-24 09:35:55 -07001014 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boiefa94ee72017-09-28 16:54:35 -07001015}
Andrew Boiefc273c02017-09-23 12:51:23 -07001016
Yong Cong Sin35704082024-05-24 22:09:31 +08001017#include <zephyr/syscall_dispatch.c>