blob: f9169ef05b78813cafe515a3c5f5dfc706bfe49c [file] [log] [blame]
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09001/*
2 * Copyright (c) 2019 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 * @brief Internal kernel APIs implemented at the architecture layer.
10 *
11 * Not all architecture-specific defines are here, APIs that are used
12 * by public functions and macros are defined in include/sys/arch_interface.h.
13 *
14 * For all inline functions prototyped here, the implementation is expected
15 * to be provided by arch/ARCH/include/kernel_arch_func.h
16 */
17#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
18#define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
19
20#include <kernel.h>
21#include <sys/arch_interface.h>
22
23#ifndef _ASMLANGUAGE
24
25#ifdef __cplusplus
26extern "C" {
27#endif
28
29/**
30 * @defgroup arch-timing Architecture timing APIs
31 * @{
32 */
33#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
34/**
35 * Architecture-specific implementation of busy-waiting
36 *
37 * @param usec_to_wait Wait period, in microseconds
38 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -050039void arch_busy_wait(uint32_t usec_to_wait);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090040#endif
41
42/** @} */
43
44/**
45 * @defgroup arch-threads Architecture thread APIs
46 * @ingroup arch-interface
47 * @{
48 */
49
50/** Handle arch-specific logic for setting up new threads
51 *
52 * The stack and arch-specific thread state variables must be set up
53 * such that a later attempt to switch to this thread will succeed
54 * and we will enter z_thread_entry with the requested thread and
55 * arguments as its parameters.
56 *
57 * At some point in this function's implementation, z_setup_new_thread() must
58 * be called with the true bounds of the available stack buffer within the
59 * thread's stack object.
60 *
Andrew Boieb0c155f2020-04-23 13:55:56 -070061 * The provided stack pointer is guaranteed to be properly aligned with respect
62 * to the CPU and ABI requirements. There may be space reserved between the
63 * stack pointer and the bounds of the stack buffer for initial stack pointer
64 * randomization and thread-local storage.
65 *
Andrew Boie62eb7d92020-04-23 11:17:14 -070066 * Fields in thread->base will be initialized when this is called.
67 *
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090068 * @param thread Pointer to uninitialized struct k_thread
Andrew Boieaa464052020-04-23 11:31:19 -070069 * @param stack Pointer to the stack object
Andrew Boieb0c155f2020-04-23 13:55:56 -070070 * @param stack_ptr Aligned initial stack pointer
Andrew Boieaa464052020-04-23 11:31:19 -070071 * @param entry Thread entry function
72 * @param p1 1st entry point parameter
73 * @param p2 2nd entry point parameter
74 * @param p3 3rd entry point parameter
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090075 */
Andrew Boieaa464052020-04-23 11:31:19 -070076void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
Andrew Boieb0c155f2020-04-23 13:55:56 -070077 char *stack_ptr, k_thread_entry_t entry,
Andrew Boie62eb7d92020-04-23 11:17:14 -070078 void *p1, void *p2, void *p3);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090079
80#ifdef CONFIG_USE_SWITCH
Andy Rossc7d0cb62021-02-11 08:53:19 -080081/** Cooperative context switch primitive
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090082 *
Andy Rossc7d0cb62021-02-11 08:53:19 -080083 * The action of arch_switch() should be to switch to a new context
84 * passed in the first argument, and save a pointer to the current
85 * context into the address passed in the second argument.
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090086 *
Andy Rossc7d0cb62021-02-11 08:53:19 -080087 * The actual type and interpretation of the switch handle is specified
88 * by the architecture. It is the same data structure stored in the
89 * "switch_handle" field of a newly-created thread in arch_new_thread(),
90 * and passed to the kernel as the "interrupted" argument to
91 * z_get_next_switch_handle().
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090092 *
Andy Rossc7d0cb62021-02-11 08:53:19 -080093 * Note that on SMP systems, the kernel uses the store through the
94 * second pointer as a synchronization point to detect when a thread
95 * context is completely saved (so another CPU can know when it is
96 * safe to switch). This store must be done AFTER all relevant state
97 * is saved, and must include whatever memory barriers or cache
98 * management code is required to be sure another CPU will see the
99 * result correctly.
Andy Ross86430d82020-01-16 09:49:12 -0800100 *
Andy Rossc7d0cb62021-02-11 08:53:19 -0800101 * The simplest implementation of arch_switch() is generally to push
102 * state onto the thread stack and use the resulting stack pointer as the
103 * switch handle. Some architectures may instead decide to use a pointer
104 * into the thread struct as the "switch handle" type. These can legally
105 * assume that the second argument to arch_switch() is the address of the
106 * switch_handle field of struct thread_base and can use an offset on
107 * this value to find other parts of the thread struct. For example a (C
108 * pseudocode) implementation of arch_switch() might look like:
Andy Ross86430d82020-01-16 09:49:12 -0800109 *
Andy Rossc7d0cb62021-02-11 08:53:19 -0800110 * void arch_switch(void *switch_to, void **switched_from)
111 * {
112 * struct k_thread *new = switch_to;
113 * struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
114 * switch_handle);
Andy Ross86430d82020-01-16 09:49:12 -0800115 *
Andy Rossc7d0cb62021-02-11 08:53:19 -0800116 * // save old context...
117 * *switched_from = old;
118 * // restore new context...
119 * }
120 *
121 * Note that the kernel manages the switch_handle field for
122 * synchronization as described above. So it is not legal for
123 * architecture code to assume that it has any particular value at any
124 * other time. In particular it is not legal to read the field from the
125 * address passed in the second argument.
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900126 *
127 * @param switch_to Incoming thread's switch handle
128 * @param switched_from Pointer to outgoing thread's switch handle storage
Andy Rossc7d0cb62021-02-11 08:53:19 -0800129 * location, which must be updated.
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900130 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800131static inline void arch_switch(void *switch_to, void **switched_from);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900132#else
133/**
134 * Cooperatively context switch
135 *
136 * Must be called with interrupts locked with the provided key.
137 * This is the older-style context switching method, which is incompatible
138 * with SMP. New arch ports, either SMP or UP, are encouraged to implement
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800139 * arch_switch() instead.
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900140 *
141 * @param key Interrupt locking key
142 * @return If woken from blocking on some kernel object, the result of that
143 * blocking operation.
144 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800145int arch_swap(unsigned int key);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900146
147/**
148 * Set the return value for the specified thread.
149 *
150 * It is assumed that the specified @a thread is pending.
151 *
152 * @param thread Pointer to thread object
153 * @param value value to set as return value
154 */
155static ALWAYS_INLINE void
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800156arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900157#endif /* CONFIG_USE_SWITCH i*/
158
159#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
160/**
161 * Custom logic for entering main thread context at early boot
162 *
163 * Used by architectures where the typical trick of setting up a dummy thread
164 * in early boot context to "switch out" of isn't workable.
165 *
166 * @param main_thread main thread object
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700167 * @param stack_ptr Initial stack pointer
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900168 * @param _main Entry point for application main function.
169 */
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700170void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
171 k_thread_entry_t _main);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900172#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
173
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900174#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900175/**
176 * @brief Disable floating point context preservation
177 *
178 * The function is used to disable the preservation of floating
179 * point context information for a particular thread.
180 *
181 * @note For ARM architecture, disabling floating point preservation may only
182 * be requested for the current thread and cannot be requested in ISRs.
183 *
184 * @retval 0 On success.
185 * @retval -EINVAL If the floating point disabling could not be performed.
186 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800187int arch_float_disable(struct k_thread *thread);
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +0900188
189/**
190 * @brief Enable floating point context preservation
191 *
192 * The function is used to enable the preservation of floating
193 * point context information for a particular thread.
194 * This API depends on each architecture implimentation. If the architecture
195 * does not support enableing, this API will always be failed.
196 *
197 * The @a options parameter indicates which floating point register sets will
198 * be used by the specified thread. Currently it is used by x86 only.
199 *
200 * @param thread ID of thread.
201 * @param options architecture dependent options
202 *
203 * @retval 0 On success.
204 * @retval -EINVAL If the floating point enabling could not be performed.
205 */
206int arch_float_enable(struct k_thread *thread, unsigned int options);
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900207#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900208
209/** @} */
210
211/**
212 * @defgroup arch-pm Architecture-specific power management APIs
213 * @ingroup arch-interface
214 * @{
215 */
216/** Halt the system, optionally propagating a reason code */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800217FUNC_NORETURN void arch_system_halt(unsigned int reason);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900218
219/** @} */
220
221
222/**
223 * @defgroup arch-irq Architecture-specific IRQ APIs
224 * @ingroup arch-interface
225 * @{
226 */
227
228/**
229 * Test if the current context is in interrupt context
230 *
231 * XXX: This is inconsistently handled among arches wrt exception context
232 * See: #17656
233 *
234 * @return true if we are in interrupt context
235 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800236static inline bool arch_is_in_isr(void);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900237
238/** @} */
239
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900240/**
Andrew Boie9ff148a2020-06-26 16:18:56 -0700241 * @defgroup arch-mmu Architecture-specific memory-mapping APIs
242 * @ingroup arch-interface
243 * @{
244 */
245
246#ifdef CONFIG_MMU
247/**
248 * Map physical memory into the virtual address space
249 *
250 * This is a low-level interface to mapping pages into the address space.
251 * Behavior when providing unaligned addresses/sizes is undefined, these
252 * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
253 *
254 * The core kernel handles all management of the virtual address space;
255 * by the time we invoke this function, we know exactly where this mapping
256 * will be established. If the page tables already had mappings installed
257 * for the virtual memory region, these will be overwritten.
258 *
259 * If the target architecture supports multiple page sizes, currently
260 * only the smallest page size will be used.
261 *
262 * The memory range itself is never accessed by this operation.
263 *
264 * This API must be safe to call in ISRs or exception handlers. Calls
265 * to this API are assumed to be serialized, and indeed all usage will
266 * originate from kernel/mm.c which handles virtual memory management.
267 *
Andrew Boie299a2cf2020-12-18 12:01:31 -0800268 * Architectures are expected to pre-allocate page tables for the entire
269 * address space, as defined by CONFIG_KERNEL_VM_BASE and
270 * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
271 * allocation for paging structures.
272 *
273 * Validation of arguments should be done via assertions.
274 *
Andrew Boie9ff148a2020-06-26 16:18:56 -0700275 * This API is part of infrastructure still under development and may
276 * change.
277 *
Anas Nashif25c87db2021-03-29 10:54:23 -0400278 * @param virt Page-aligned Destination virtual address to map
279 * @param phys Page-aligned Source physical address to map
Andrew Boie9ff148a2020-06-26 16:18:56 -0700280 * @param size Page-aligned size of the mapped memory region in bytes
281 * @param flags Caching, access and control flags, see K_MAP_* macros
Andrew Boie9ff148a2020-06-26 16:18:56 -0700282 */
Anas Nashif25c87db2021-03-29 10:54:23 -0400283void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
Andrew Boief5c3fc42020-10-23 12:21:19 -0700284
285/**
286 * Remove mappings for a provided virtual address range
287 *
288 * This is a low-level interface for un-mapping pages from the address space.
289 * When this completes, the relevant page table entries will be updated as
290 * if no mapping was ever made for that memory range. No previous context
291 * needs to be preserved. This function must update mappings in all active
292 * page tables.
293 *
294 * Behavior when providing unaligned addresses/sizes is undefined, these
295 * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
296 *
297 * Behavior when providing an address range that is not already mapped is
298 * undefined.
299 *
300 * This function should never require memory allocations for paging structures,
301 * and it is not necessary to free any paging structures. Empty page tables
302 * due to all contained entries being un-mapped may remain in place.
303 *
304 * Implementations must invalidate TLBs as necessary.
305 *
306 * This API is part of infrastructure still under development and may change.
307 *
308 * @param addr Page-aligned base virtual address to un-map
309 * @param size Page-aligned region size
310 */
311void arch_mem_unmap(void *addr, size_t size);
Andrew Boie73a3e052020-11-18 13:11:56 -0800312
Daniel Leung085d3762021-04-15 18:44:56 -0700313/**
314 * Get the mapped physical memory address from virtual address.
315 *
316 * The function only needs to query the current set of page tables as
317 * the information it reports must be common to all of them if multiple
318 * page tables are in use. If multiple page tables are active it is unnecessary
319 * to iterate over all of them.
320 *
321 * Unless otherwise specified, virtual pages have the same mappings
322 * across all page tables. Calling this function on data pages that are
323 * exceptions to this rule (such as the scratch page) is undefined behavior.
324 * Just check the currently installed page tables and return the information
325 * in that.
326 *
327 * @param virt Page-aligned virtual address
328 * @param[out] phys Mapped physical address (can be NULL if only checking
329 * if virtual address is mapped)
330 *
331 * @retval 0 if mapping is found and valid
332 * @retval -EFAULT if virtual address is not mapped
333 */
334int arch_page_phys_get(void *virt, uintptr_t *phys);
335
Andrew Boie73a3e052020-11-18 13:11:56 -0800336#ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
337/**
338 * Update page frame database with reserved pages
339 *
340 * Some page frames within system RAM may not be available for use. A good
341 * example of this is reserved regions in the first megabyte on PC-like systems.
342 *
343 * Implementations of this function should mark all relavent entries in
344 * z_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
345 * early system initialization with mm_lock held.
346 */
347void arch_reserved_pages_update(void);
348#endif /* ARCH_HAS_RESERVED_PAGE_FRAMES */
Andrew Boiea6eca9f2020-11-17 15:05:57 -0800349
350#ifdef CONFIG_DEMAND_PAGING
351/**
352 * Update all page tables for a paged-out data page
353 *
354 * This function:
355 * - Sets the data page virtual address to trigger a fault if accessed that
356 * can be distinguished from access violations or un-mapped pages.
357 * - Saves the provided location value so that it can retrieved for that
358 * data page in the page fault handler.
359 * - The location value semantics are undefined here but the value will be
360 * always be page-aligned. It could be 0.
361 *
362 * If multiple page tables are in use, this must update all page tables.
363 * This function is called with interrupts locked.
364 *
365 * Calling this function on data pages which are already paged out is
366 * undefined behavior.
367 *
368 * This API is part of infrastructure still under development and may change.
369 */
370void arch_mem_page_out(void *addr, uintptr_t location);
371
372/**
373 * Update all page tables for a paged-in data page
374 *
375 * This function:
376 * - Maps the specified virtual data page address to the provided physical
377 * page frame address, such that future memory accesses will function as
378 * expected. Access and caching attributes are undisturbed.
379 * - Clears any accounting for "accessed" and "dirty" states.
380 *
381 * If multiple page tables are in use, this must update all page tables.
382 * This function is called with interrupts locked.
383 *
384 * Calling this function on data pages which are already paged in is
385 * undefined behavior.
386 *
387 * This API is part of infrastructure still under development and may change.
388 */
389void arch_mem_page_in(void *addr, uintptr_t phys);
390
391/**
392 * Update current page tables for a temporary mapping
393 *
394 * Map a physical page frame address to a special virtual address
395 * Z_SCRATCH_PAGE, with read/write access to supervisor mode, such that
396 * when this function returns, the calling context can read/write the page
397 * frame's contents from the Z_SCRATCH_PAGE address.
398 *
399 * This mapping only needs to be done on the current set of page tables,
400 * as it is only used for a short period of time exclusively by the caller.
401 * This function is called with interrupts locked.
402 *
403 * This API is part of infrastructure still under development and may change.
404 */
405void arch_mem_scratch(uintptr_t phys);
406
407enum arch_page_location {
408 ARCH_PAGE_LOCATION_PAGED_OUT,
409 ARCH_PAGE_LOCATION_PAGED_IN,
410 ARCH_PAGE_LOCATION_BAD
411};
412
413/**
414 * Fetch location information about a page at a particular address
415 *
416 * The function only needs to query the current set of page tables as
417 * the information it reports must be common to all of them if multiple
418 * page tables are in use. If multiple page tables are active it is unnecessary
419 * to iterate over all of them. This may allow certain types of optimizations
420 * (such as reverse page table mapping on x86).
421 *
422 * This function is called with interrupts locked, so that the reported
423 * information can't become stale while decisions are being made based on it.
424 *
425 * Unless otherwise specified, virtual data pages have the same mappings
426 * across all page tables. Calling this function on data pages that are
427 * exceptions to this rule (such as the scratch page) is undefined behavior.
428 * Just check the currently installed page tables and return the information
429 * in that.
430 *
431 * @param addr Virtual data page address that took the page fault
432 * @param [out] location In the case of ARCH_PAGE_FAULT_PAGED_OUT, the backing
433 * store location value used to retrieve the data page. In the case of
434 * ARCH_PAGE_FAULT_PAGED_IN, the physical address the page is mapped to.
435 * @retval ARCH_PAGE_FAULT_PAGED_OUT The page was evicted to the backing store.
436 * @retval ARCH_PAGE_FAULT_PAGED_IN The data page is resident in memory.
437 * @retval ARCH_PAGE_FAULT_BAD The page is un-mapped or otherwise has had
438 * invalid access
439 */
440enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
441
442/**
443 * @def ARCH_DATA_PAGE_ACCESSED
444 *
445 * Bit indicating the data page was accessed since the value was last cleared.
446 *
447 * Used by marking eviction algorithms. Safe to set this if uncertain.
448 *
449 * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
450 */
451
452 /**
453 * @def ARCH_DATA_PAGE_DIRTY
454 *
455 * Bit indicating the data page, if evicted, will need to be paged out.
456 *
457 * Set if the data page was modified since it was last paged out, or if
458 * it has never been paged out before. Safe to set this if uncertain.
459 *
460 * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
461 */
462
463 /**
464 * @def ARCH_DATA_PAGE_LOADED
465 *
466 * Bit indicating that the data page is loaded into a physical page frame.
467 *
468 * If un-set, the data page is paged out or not mapped.
469 */
470
471/**
472 * @def ARCH_DATA_PAGE_NOT_MAPPED
473 *
474 * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
475 * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
476 */
477
478/**
479 * Retrieve page characteristics from the page table(s)
480 *
481 * The architecture is responsible for maintaining "accessed" and "dirty"
482 * states of data pages to support marking eviction algorithms. This can
483 * either be directly supported by hardware or emulated by modifying
484 * protection policy to generate faults on reads or writes. In all cases
485 * the architecture must maintain this information in some way.
486 *
487 * For the provided virtual address, report the logical OR of the accessed
488 * and dirty states for the relevant entries in all active page tables in
489 * the system if the page is mapped and not paged out.
490 *
491 * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
492 * This function will report its prior state. If multiple page tables are in
493 * use, this function clears accessed state in all of them.
494 *
495 * This function is called with interrupts locked, so that the reported
496 * information can't become stale while decisions are being made based on it.
497 *
498 * The return value may have other bits set which the caller must ignore.
499 *
500 * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
501 * is undefined behavior.
502 *
503 * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
504 * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
505 * them.
506 *
507 * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
508 * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
509 *
510 * Unless otherwise specified, virtual data pages have the same mappings
511 * across all page tables. Calling this function on data pages that are
512 * exceptions to this rule (such as the scratch page) is undefined behavior.
513 *
514 * This API is part of infrastructure still under development and may change.
515 *
516 * @param addr Virtual address to look up in page tables
517 * @param [out] location If non-NULL, updated with either physical page frame
518 * address or backing store location depending on
519 * ARCH_DATA_PAGE_LOADED state. This is not touched if
520 * ARCH_DATA_PAGE_NOT_MAPPED.
521 * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
522 * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
523 * configuration
524 */
525uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
526 bool clear_accessed);
527#endif /* CONFIG_DEMAND_PAGING */
Andrew Boie9ff148a2020-06-26 16:18:56 -0700528#endif /* CONFIG_MMU */
529/** @} */
530
531/**
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900532 * @defgroup arch-misc Miscellaneous architecture APIs
533 * @ingroup arch-interface
Kumar Galaa8171db2019-12-12 10:51:46 -0600534 * @{
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900535 */
536
537/**
Andrew Boie4855eaa2020-06-16 19:14:54 -0700538 * Early boot console output hook
539 *
540 * Definition of this function is optional. If implemented, any invocation
541 * of printk() (or logging calls with CONFIG_LOG_MINIMAL which are backed by
542 * printk) will default to sending characters to this function. It is
543 * useful for early boot debugging before main serial or console drivers
544 * come up.
545 *
546 * This can be overridden at runtime with __printk_hook_install().
547 *
548 * The default __weak implementation of this does nothing.
549 *
550 * @param c Character to print
551 * @return The character printed
552 */
553int arch_printk_char_out(int c);
554
555/**
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900556 * Architecture-specific kernel initialization hook
557 *
558 * This function is invoked near the top of _Cstart, for additional
559 * architecture-specific setup before the rest of the kernel is brought up.
560 *
561 * TODO: Deprecate, most arches are using a prep_c() function to do the same
562 * thing in a simpler way
563 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800564static inline void arch_kernel_init(void);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900565
566/** Do nothing and return. Yawn. */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800567static inline void arch_nop(void);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900568
569/** @} */
570
Daniel Leung49206a82020-08-07 10:47:37 -0700571/**
572 * @defgroup arch-coredump Architecture-specific core dump APIs
573 * @ingroup arch-interface
574 * @{
575 */
576
577/**
578 * @brief Architecture-specific handling during coredump
579 *
580 * This dumps architecture-specific information during coredump.
581 *
582 * @param esf Exception Stack Frame (arch-specific)
583 */
584void arch_coredump_info_dump(const z_arch_esf_t *esf);
585
586/**
587 * @brief Get the target code specified by the architecture.
588 */
589uint16_t arch_coredump_tgt_code_get(void);
590
591/** @} */
592
Daniel Leung02b20352020-09-28 11:27:11 -0700593/**
594 * @defgroup arch-tls Architecture-specific Thread Local Storage APIs
595 * @ingroup arch-interface
596 * @{
597 */
598
599/**
600 * @brief Setup Architecture-specific TLS area in stack
601 *
602 * This sets up the stack area for thread local storage.
603 * The structure inside in area is architecture specific.
604 *
605 * @param new_thread New thread object
606 * @param stack_ptr Stack pointer
607 * @return Number of bytes taken by the TLS area
608 */
609size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
610
611/** @} */
612
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900613/* Include arch-specific inline function implementation */
614#include <kernel_arch_func.h>
615
616#ifdef __cplusplus
617}
618#endif
619
620#endif /* _ASMLANGUAGE */
621
622#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */