Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018, 2024 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | #include <zephyr/kernel.h> |
| 7 | #include <kswap.h> |
| 8 | #include <ksched.h> |
| 9 | #include <ipi.h> |
| 10 | |
| 11 | static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms); |
| 12 | static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY; |
| 13 | static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS]; |
| 14 | static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; |
| 15 | |
| 16 | #ifdef CONFIG_SWAP_NONATOMIC |
| 17 | /* If z_swap() isn't atomic, then it's possible for a timer interrupt |
| 18 | * to try to timeslice away _current after it has already pended |
| 19 | * itself but before the corresponding context switch. Treat that as |
| 20 | * a noop condition in z_time_slice(). |
| 21 | */ |
| 22 | struct k_thread *pending_current; |
| 23 | #endif |
| 24 | |
| 25 | static inline int slice_time(struct k_thread *thread) |
| 26 | { |
| 27 | int ret = slice_ticks; |
| 28 | |
| 29 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 30 | if (thread->base.slice_ticks != 0) { |
| 31 | ret = thread->base.slice_ticks; |
| 32 | } |
| 33 | #else |
| 34 | ARG_UNUSED(thread); |
| 35 | #endif |
| 36 | return ret; |
| 37 | } |
| 38 | |
Anas Nashif | 0b473ce | 2024-03-27 07:13:45 -0400 | [diff] [blame] | 39 | bool thread_is_sliceable(struct k_thread *thread) |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 40 | { |
Anas Nashif | 5c170c7 | 2024-03-28 07:20:51 -0400 | [diff] [blame] | 41 | bool ret = thread_is_preemptible(thread) |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 42 | && slice_time(thread) != 0 |
| 43 | && !z_is_prio_higher(thread->base.prio, slice_max_prio) |
| 44 | && !z_is_thread_prevented_from_running(thread) |
| 45 | && !z_is_idle_thread_object(thread); |
| 46 | |
| 47 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 48 | ret |= thread->base.slice_ticks != 0; |
| 49 | #endif |
| 50 | |
| 51 | return ret; |
| 52 | } |
| 53 | |
| 54 | static void slice_timeout(struct _timeout *timeout) |
| 55 | { |
| 56 | int cpu = ARRAY_INDEX(slice_timeouts, timeout); |
| 57 | |
| 58 | slice_expired[cpu] = true; |
| 59 | |
| 60 | /* We need an IPI if we just handled a timeslice expiration |
Peter Mitsis | d8a4c8a | 2024-02-16 13:54:47 -0500 | [diff] [blame] | 61 | * for a different CPU. |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 62 | */ |
Peter Mitsis | d8a4c8a | 2024-02-16 13:54:47 -0500 | [diff] [blame] | 63 | if (cpu != _current_cpu->id) { |
| 64 | flag_ipi(IPI_CPU_MASK(cpu)); |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 65 | } |
| 66 | } |
| 67 | |
| 68 | void z_reset_time_slice(struct k_thread *thread) |
| 69 | { |
| 70 | int cpu = _current_cpu->id; |
| 71 | |
| 72 | z_abort_timeout(&slice_timeouts[cpu]); |
| 73 | slice_expired[cpu] = false; |
Anas Nashif | 0b473ce | 2024-03-27 07:13:45 -0400 | [diff] [blame] | 74 | if (thread_is_sliceable(thread)) { |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 75 | z_add_timeout(&slice_timeouts[cpu], slice_timeout, |
| 76 | K_TICKS(slice_time(thread) - 1)); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | void k_sched_time_slice_set(int32_t slice, int prio) |
| 81 | { |
| 82 | K_SPINLOCK(&_sched_spinlock) { |
| 83 | slice_ticks = k_ms_to_ticks_ceil32(slice); |
| 84 | slice_max_prio = prio; |
| 85 | z_reset_time_slice(_current); |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 90 | void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks, |
| 91 | k_thread_timeslice_fn_t expired, void *data) |
| 92 | { |
| 93 | K_SPINLOCK(&_sched_spinlock) { |
| 94 | thread->base.slice_ticks = thread_slice_ticks; |
| 95 | thread->base.slice_expired = expired; |
| 96 | thread->base.slice_data = data; |
Adrian Bonislawski | e44d2e6 | 2024-04-24 10:56:24 +0200 | [diff] [blame] | 97 | z_reset_time_slice(thread); |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 98 | } |
| 99 | } |
| 100 | #endif |
| 101 | |
| 102 | /* Called out of each timer interrupt */ |
| 103 | void z_time_slice(void) |
| 104 | { |
| 105 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
| 106 | struct k_thread *curr = _current; |
| 107 | |
| 108 | #ifdef CONFIG_SWAP_NONATOMIC |
| 109 | if (pending_current == curr) { |
| 110 | z_reset_time_slice(curr); |
| 111 | k_spin_unlock(&_sched_spinlock, key); |
| 112 | return; |
| 113 | } |
| 114 | pending_current = NULL; |
| 115 | #endif |
| 116 | |
Anas Nashif | 0b473ce | 2024-03-27 07:13:45 -0400 | [diff] [blame] | 117 | if (slice_expired[_current_cpu->id] && thread_is_sliceable(curr)) { |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 118 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 119 | if (curr->base.slice_expired) { |
| 120 | k_spin_unlock(&_sched_spinlock, key); |
| 121 | curr->base.slice_expired(curr, curr->base.slice_data); |
| 122 | key = k_spin_lock(&_sched_spinlock); |
| 123 | } |
| 124 | #endif |
| 125 | if (!z_is_thread_prevented_from_running(curr)) { |
| 126 | move_thread_to_end_of_prio_q(curr); |
| 127 | } |
| 128 | z_reset_time_slice(curr); |
| 129 | } |
| 130 | k_spin_unlock(&_sched_spinlock, key); |
| 131 | } |