blob: e4c79a5780a18ff7c934a84f754bf5c813666a96 [file] [log] [blame]
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05005 */
6
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09007/*
8 * The purpose of this file is to provide essential/minimal kernel structure
9 * definitions, so that they can be used without including kernel.h.
10 *
11 * The following rules must be observed:
12 * 1. kernel_structs.h shall not depend on kernel.h both directly and
13 * indirectly (i.e. it shall not include any header files that include
14 * kernel.h in their dependency chain).
15 * 2. kernel.h shall imply kernel_structs.h, such that it shall not be
16 * necessary to include kernel_structs.h explicitly when kernel.h is
17 * included.
18 */
19
Flavio Ceolina7fffa92018-09-13 15:06:35 -070020#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21#define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050022
Benjamin Walshdfa7ce52017-01-22 17:06:05 -050023#if !defined(_ASMLANGUAGE)
Andy Ross0dd83b82020-04-03 10:01:03 -070024#include <sys/atomic.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090025#include <zephyr/types.h>
Anas Nashif8b3f36c2021-06-14 17:04:04 -040026#include <kernel/sched_priq.h>
Anas Nashifee9dd1a2019-06-26 10:33:41 -040027#include <sys/dlist.h>
Anas Nashifa2fd7d72019-06-26 10:33:55 -040028#include <sys/util.h>
Andy Ross0dd83b82020-04-03 10:01:03 -070029#include <sys/sys_heap.h>
Nicolas Pitref97d1292021-04-15 16:27:45 -040030#include <arch/structs.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050031#endif
32
Marcin Zwierz4cae9b82021-06-09 08:06:35 +020033#ifdef __cplusplus
34extern "C" {
35#endif
36
Andy Ross8ac9c082017-12-08 17:38:12 -080037#define K_NUM_PRIORITIES \
38 (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
39
40#define K_NUM_PRIO_BITMAPS ((K_NUM_PRIORITIES + 31) >> 5)
41
Benjamin Walshb2974a62016-11-24 14:08:08 -050042/*
Benjamin Walshed240f22017-01-22 13:05:08 -050043 * Bitmask definitions for the struct k_thread.thread_state field.
Benjamin Walshb2974a62016-11-24 14:08:08 -050044 *
45 * Must be before kerneL_arch_data.h because it might need them to be already
46 * defined.
47 */
48
Benjamin Walshf9554762016-12-21 15:38:54 -050049/* states: common uses low bits, arch-specific use high bits */
50
Benjamin Walsh867f8ee2017-01-22 11:51:25 -050051/* Not a real thread */
Flavio Ceolin8aec0872018-08-15 11:52:00 -070052#define _THREAD_DUMMY (BIT(0))
Benjamin Walshb2974a62016-11-24 14:08:08 -050053
Benjamin Walshb2974a62016-11-24 14:08:08 -050054/* Thread is waiting on an object */
Flavio Ceolin8aec0872018-08-15 11:52:00 -070055#define _THREAD_PENDING (BIT(1))
Benjamin Walshb2974a62016-11-24 14:08:08 -050056
57/* Thread has not yet started */
Flavio Ceolin8aec0872018-08-15 11:52:00 -070058#define _THREAD_PRESTART (BIT(2))
Benjamin Walshb2974a62016-11-24 14:08:08 -050059
60/* Thread has terminated */
Flavio Ceolin8aec0872018-08-15 11:52:00 -070061#define _THREAD_DEAD (BIT(3))
Benjamin Walshb2974a62016-11-24 14:08:08 -050062
63/* Thread is suspended */
Flavio Ceolin8aec0872018-08-15 11:52:00 -070064#define _THREAD_SUSPENDED (BIT(4))
Benjamin Walshb2974a62016-11-24 14:08:08 -050065
Andrew Boief5a7e1a2020-09-02 09:20:38 -070066/* Thread is being aborted */
Andy Ross42ed12a2019-02-19 16:03:39 -080067#define _THREAD_ABORTING (BIT(5))
68
Andy Ross1acd8c22018-05-03 14:51:49 -070069/* Thread is present in the ready queue */
Andy Rosse06ba702020-01-14 06:26:10 -080070#define _THREAD_QUEUED (BIT(7))
Andy Ross1acd8c22018-05-03 14:51:49 -070071
Benjamin Walshf9554762016-12-21 15:38:54 -050072/* end - states */
73
Andrew Boie5dcb2792017-05-11 13:29:15 -070074#ifdef CONFIG_STACK_SENTINEL
75/* Magic value in lowest bytes of the stack */
76#define STACK_SENTINEL 0xF0F0F0F0
77#endif
Benjamin Walshf9554762016-12-21 15:38:54 -050078
Benjamin Walsh168695c2016-12-21 16:00:35 -050079/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
Aastha Grover83b9f692020-08-20 16:47:11 -070080#define _NON_PREEMPT_THRESHOLD 0x0080U
Benjamin Walsh168695c2016-12-21 16:00:35 -050081
82/* highest value of _thread_base.preempt at which a thread is preemptible */
Aastha Grover83b9f692020-08-20 16:47:11 -070083#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
Benjamin Walshb2974a62016-11-24 14:08:08 -050084
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050085#if !defined(_ASMLANGUAGE)
86
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050087struct _ready_q {
Andy Ross2724fd12018-01-29 14:55:20 -080088#ifndef CONFIG_SMP
Benjamin Walsh04ed8602016-12-21 14:36:43 -050089 /* always contains next thread to run: cannot be NULL */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050090 struct k_thread *cache;
Andy Ross2724fd12018-01-29 14:55:20 -080091#endif
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050092
Andy Ross9f06a352018-06-28 10:38:14 -070093#if defined(CONFIG_SCHED_DUMB)
Andy Ross1acd8c22018-05-03 14:51:49 -070094 sys_dlist_t runq;
Andy Ross9f06a352018-06-28 10:38:14 -070095#elif defined(CONFIG_SCHED_SCALABLE)
Andy Ross1acd8c22018-05-03 14:51:49 -070096 struct _priq_rb runq;
Andy Ross9f06a352018-06-28 10:38:14 -070097#elif defined(CONFIG_SCHED_MULTIQ)
98 struct _priq_mq runq;
Andy Ross1acd8c22018-05-03 14:51:49 -070099#endif
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500100};
101
Benjamin Walsh88b36912016-12-02 10:37:27 -0500102typedef struct _ready_q _ready_q_t;
103
Andy Rosse6946562018-01-25 16:39:35 -0800104struct _cpu {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500105 /* nested interrupt count */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500106 uint32_t nested;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500107
108 /* interrupt stack pointer base */
109 char *irq_stack;
110
111 /* currently scheduled thread */
112 struct k_thread *current;
Andy Ross2724fd12018-01-29 14:55:20 -0800113
Andy Ross1acd8c22018-05-03 14:51:49 -0700114 /* one assigned idle thread per CPU */
115 struct k_thread *idle_thread;
116
Andy Ross11a050b2019-11-13 09:41:52 -0800117#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
118 /* Coop thread preempted by current metairq, or NULL */
119 struct k_thread *metairq_preempted;
120#endif
121
Andy Ross9098a452018-09-25 10:56:09 -0700122#ifdef CONFIG_TIMESLICING
123 /* number of ticks remaining in current time slice */
124 int slice_ticks;
125#endif
126
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500127 uint8_t id;
Andy Rosseace1df2018-05-30 11:23:02 -0700128
129#ifdef CONFIG_SMP
130 /* True when _current is allowed to context switch */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500131 uint8_t swap_ok;
Andy Rosseace1df2018-05-30 11:23:02 -0700132#endif
Nicolas Pitref97d1292021-04-15 16:27:45 -0400133
134 /* Per CPU architecture specifics */
135 struct _cpu_arch arch;
Andy Rosse6946562018-01-25 16:39:35 -0800136};
137
138typedef struct _cpu _cpu_t;
139
Flavio Ceolina406b882018-11-01 17:50:02 -0700140struct z_kernel {
Andrew Boiea203d212020-03-16 10:18:03 -0700141 struct _cpu cpus[CONFIG_MP_NUM_CPUS];
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500142
143#ifdef CONFIG_SYS_CLOCK_EXISTS
144 /* queue of timeouts */
145 sys_dlist_t timeout_q;
146#endif
147
Anas Nashifdd931f92020-09-01 18:31:40 -0400148#ifdef CONFIG_PM
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500149 int32_t idle; /* Number of ticks for kernel idling */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500150#endif
151
152 /*
153 * ready queue: can be big, keep after small fields, since some
Benjamin Walshba266782016-11-14 10:17:30 -0500154 * assembly (e.g. ARC) are limited in the encoding of the offset
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500155 */
156 struct _ready_q ready_q;
157
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900158#ifdef CONFIG_FPU_SHARING
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500159 /*
160 * A 'current_sse' field does not exist in addition to the 'current_fp'
161 * field since it's not possible to divide the IA-32 non-integer
162 * registers into 2 distinct blocks owned by differing threads. In
163 * other words, given that the 'fxnsave/fxrstor' instructions
164 * save/restore both the X87 FPU and XMM registers, it's not possible
165 * for a thread to only "own" the XMM registers.
166 */
167
Anas Nashif780324b2017-10-29 07:10:22 -0400168 /* thread that owns the FP regs */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500169 struct k_thread *current_fp;
170#endif
171
172#if defined(CONFIG_THREAD_MONITOR)
Anas Nashif780324b2017-10-29 07:10:22 -0400173 struct k_thread *threads; /* singly linked list of ALL threads */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500174#endif
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500175};
176
Flavio Ceolina406b882018-11-01 17:50:02 -0700177typedef struct z_kernel _kernel_t;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500178
Flavio Ceolina406b882018-11-01 17:50:02 -0700179extern struct z_kernel _kernel;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500180
Andy Rosse6946562018-01-25 16:39:35 -0800181#ifdef CONFIG_SMP
Andy Rosseefd3da2020-02-06 13:39:52 -0800182
183/* True if the current context can be preempted and migrated to
184 * another SMP CPU.
185 */
186bool z_smp_cpu_mobile(void);
187
188#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
189 arch_curr_cpu(); })
Andrew Boief07df422020-11-06 13:11:12 -0800190#define _current z_current_get()
Andy Rosseefd3da2020-02-06 13:39:52 -0800191
Andy Rosse6946562018-01-25 16:39:35 -0800192#else
Andy Ross1acd8c22018-05-03 14:51:49 -0700193#define _current_cpu (&_kernel.cpus[0])
Andrew Boiea203d212020-03-16 10:18:03 -0700194#define _current _kernel.cpus[0].current
Andy Rosse6946562018-01-25 16:39:35 -0800195#endif
196
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500197#define _timeout_q _kernel.timeout_q
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500198
Andy Ross41220d22020-03-31 09:17:09 -0700199/* kernel wait queue record */
200
201#ifdef CONFIG_WAITQ_SCALABLE
202
203typedef struct {
204 struct _priq_rb waitq;
205} _wait_q_t;
206
207extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
208
209#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
210
211#else
212
213typedef struct {
214 sys_dlist_t waitq;
215} _wait_q_t;
216
217#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
218
219#endif
220
221/* kernel timeout record */
222
223struct _timeout;
224typedef void (*_timeout_func_t)(struct _timeout *t);
225
226struct _timeout {
227 sys_dnode_t node;
Andy Ross41220d22020-03-31 09:17:09 -0700228 _timeout_func_t fn;
Andy Ross150e18d2020-06-17 08:56:40 -0700229#ifdef CONFIG_TIMEOUT_64BIT
230 /* Can't use k_ticks_t for header dependency reasons */
Peter Bigot5f0991d2020-09-03 14:30:04 -0500231 int64_t dticks;
Andy Ross150e18d2020-06-17 08:56:40 -0700232#else
Peter Bigot5f0991d2020-09-03 14:30:04 -0500233 int32_t dticks;
Andy Ross150e18d2020-06-17 08:56:40 -0700234#endif
Andy Ross41220d22020-03-31 09:17:09 -0700235};
236
Marcin Zwierz4cae9b82021-06-09 08:06:35 +0200237#ifdef __cplusplus
238}
239#endif
240
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500241#endif /* _ASMLANGUAGE */
242
Flavio Ceolina7fffa92018-09-13 15:06:35 -0700243#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */