blob: be91d9606f51e2d1a833a2601ddc0e29a0005105 [file] [log] [blame]
Anas Nashif37df4852024-03-08 07:51:01 -05001/*
2 * Copyright (c) 2018, 2024 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6#include <zephyr/kernel.h>
7#include <kswap.h>
8#include <ksched.h>
9#include <ipi.h>
10
11static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
12static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
13static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
14static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
15
16#ifdef CONFIG_SWAP_NONATOMIC
17/* If z_swap() isn't atomic, then it's possible for a timer interrupt
18 * to try to timeslice away _current after it has already pended
19 * itself but before the corresponding context switch. Treat that as
20 * a noop condition in z_time_slice().
21 */
22struct k_thread *pending_current;
23#endif
24
25static inline int slice_time(struct k_thread *thread)
26{
27 int ret = slice_ticks;
28
29#ifdef CONFIG_TIMESLICE_PER_THREAD
30 if (thread->base.slice_ticks != 0) {
31 ret = thread->base.slice_ticks;
32 }
33#else
34 ARG_UNUSED(thread);
35#endif
36 return ret;
37}
38
Anas Nashif0b473ce2024-03-27 07:13:45 -040039bool thread_is_sliceable(struct k_thread *thread)
Anas Nashif37df4852024-03-08 07:51:01 -050040{
Anas Nashif5c170c72024-03-28 07:20:51 -040041 bool ret = thread_is_preemptible(thread)
Anas Nashif37df4852024-03-08 07:51:01 -050042 && slice_time(thread) != 0
43 && !z_is_prio_higher(thread->base.prio, slice_max_prio)
44 && !z_is_thread_prevented_from_running(thread)
45 && !z_is_idle_thread_object(thread);
46
47#ifdef CONFIG_TIMESLICE_PER_THREAD
48 ret |= thread->base.slice_ticks != 0;
49#endif
50
51 return ret;
52}
53
54static void slice_timeout(struct _timeout *timeout)
55{
56 int cpu = ARRAY_INDEX(slice_timeouts, timeout);
57
58 slice_expired[cpu] = true;
59
60 /* We need an IPI if we just handled a timeslice expiration
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050061 * for a different CPU.
Anas Nashif37df4852024-03-08 07:51:01 -050062 */
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050063 if (cpu != _current_cpu->id) {
64 flag_ipi(IPI_CPU_MASK(cpu));
Anas Nashif37df4852024-03-08 07:51:01 -050065 }
66}
67
68void z_reset_time_slice(struct k_thread *thread)
69{
70 int cpu = _current_cpu->id;
71
72 z_abort_timeout(&slice_timeouts[cpu]);
73 slice_expired[cpu] = false;
Anas Nashif0b473ce2024-03-27 07:13:45 -040074 if (thread_is_sliceable(thread)) {
Anas Nashif37df4852024-03-08 07:51:01 -050075 z_add_timeout(&slice_timeouts[cpu], slice_timeout,
76 K_TICKS(slice_time(thread) - 1));
77 }
78}
79
80void k_sched_time_slice_set(int32_t slice, int prio)
81{
82 K_SPINLOCK(&_sched_spinlock) {
83 slice_ticks = k_ms_to_ticks_ceil32(slice);
84 slice_max_prio = prio;
85 z_reset_time_slice(_current);
86 }
87}
88
89#ifdef CONFIG_TIMESLICE_PER_THREAD
90void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks,
91 k_thread_timeslice_fn_t expired, void *data)
92{
93 K_SPINLOCK(&_sched_spinlock) {
94 thread->base.slice_ticks = thread_slice_ticks;
95 thread->base.slice_expired = expired;
96 thread->base.slice_data = data;
Adrian Bonislawskie44d2e62024-04-24 10:56:24 +020097 z_reset_time_slice(thread);
Anas Nashif37df4852024-03-08 07:51:01 -050098 }
99}
100#endif
101
102/* Called out of each timer interrupt */
103void z_time_slice(void)
104{
105 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
106 struct k_thread *curr = _current;
107
108#ifdef CONFIG_SWAP_NONATOMIC
109 if (pending_current == curr) {
110 z_reset_time_slice(curr);
111 k_spin_unlock(&_sched_spinlock, key);
112 return;
113 }
114 pending_current = NULL;
115#endif
116
Anas Nashif0b473ce2024-03-27 07:13:45 -0400117 if (slice_expired[_current_cpu->id] && thread_is_sliceable(curr)) {
Anas Nashif37df4852024-03-08 07:51:01 -0500118#ifdef CONFIG_TIMESLICE_PER_THREAD
119 if (curr->base.slice_expired) {
120 k_spin_unlock(&_sched_spinlock, key);
121 curr->base.slice_expired(curr, curr->base.slice_data);
122 key = k_spin_lock(&_sched_spinlock);
123 }
124#endif
125 if (!z_is_thread_prevented_from_running(curr)) {
126 move_thread_to_end_of_prio_q(curr);
127 }
128 z_reset_time_slice(curr);
129 }
130 k_spin_unlock(&_sched_spinlock, key);
131}