blob: ee01c4594251ca41833e7f4eeba282f2894947d2 [file] [log] [blame]
Anas Nashif37df4852024-03-08 07:51:01 -05001/**
2 * Copyright (c) 2024 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6#include <zephyr/kernel.h>
7#include <kswap.h>
8#include <ksched.h>
9#include <ipi.h>
10
11#ifdef CONFIG_TRACE_SCHED_IPI
12extern void z_trace_sched_ipi(void);
13#endif
14
15
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050016void flag_ipi(uint32_t ipi_mask)
Anas Nashif37df4852024-03-08 07:51:01 -050017{
18#if defined(CONFIG_SCHED_IPI_SUPPORTED)
19 if (arch_num_cpus() > 1) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050020 atomic_or(&_kernel.pending_ipi, (atomic_val_t)ipi_mask);
Anas Nashif37df4852024-03-08 07:51:01 -050021 }
22#endif /* CONFIG_SCHED_IPI_SUPPORTED */
23}
24
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050025/* Create a bitmask of CPUs that need an IPI. Note: sched_spinlock is held. */
26atomic_val_t ipi_mask_create(struct k_thread *thread)
27{
28 if (!IS_ENABLED(CONFIG_IPI_OPTIMIZE)) {
29 return (CONFIG_MP_MAX_NUM_CPUS > 1) ? IPI_ALL_CPUS_MASK : 0;
30 }
31
32 uint32_t ipi_mask = 0;
33 uint32_t num_cpus = (uint32_t)arch_num_cpus();
34 uint32_t id = _current_cpu->id;
35 struct k_thread *cpu_thread;
36 bool executable_on_cpu = true;
37
38 for (uint32_t i = 0; i < num_cpus; i++) {
39 if (id == i) {
40 continue;
41 }
42
43 /*
44 * An IPI absolutely does not need to be sent if ...
45 * 1. the CPU is not active, or
46 * 2. <thread> can not execute on the target CPU
47 * ... and might not need to be sent if ...
48 * 3. the target CPU's active thread is not preemptible, or
49 * 4. the target CPU's active thread has a higher priority
50 * (Items 3 & 4 may be overridden by a metaIRQ thread)
51 */
52
53#if defined(CONFIG_SCHED_CPU_MASK)
54 executable_on_cpu = ((thread->base.cpu_mask & BIT(i)) != 0);
55#endif
56
57 cpu_thread = _kernel.cpus[i].current;
58 if ((cpu_thread != NULL) &&
59 (((z_sched_prio_cmp(cpu_thread, thread) < 0) &&
60 (thread_is_preemptible(cpu_thread))) ||
61 thread_is_metairq(thread)) && executable_on_cpu) {
62 ipi_mask |= BIT(i);
63 }
64 }
65
66 return (atomic_val_t)ipi_mask;
67}
Anas Nashif37df4852024-03-08 07:51:01 -050068
69void signal_pending_ipi(void)
70{
71 /* Synchronization note: you might think we need to lock these
72 * two steps, but an IPI is idempotent. It's OK if we do it
73 * twice. All we require is that if a CPU sees the flag true,
74 * it is guaranteed to send the IPI, and if a core sets
75 * pending_ipi, the IPI will be sent the next time through
76 * this code.
77 */
78#if defined(CONFIG_SCHED_IPI_SUPPORTED)
79 if (arch_num_cpus() > 1) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -050080 uint32_t cpu_bitmap;
81
82 cpu_bitmap = (uint32_t)atomic_clear(&_kernel.pending_ipi);
83 if (cpu_bitmap != 0) {
Peter Mitsis0bcdae22024-03-04 10:52:24 -050084#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
85 arch_sched_directed_ipi(cpu_bitmap);
86#else
87 arch_sched_broadcast_ipi();
88#endif
Anas Nashif37df4852024-03-08 07:51:01 -050089 }
90 }
91#endif /* CONFIG_SCHED_IPI_SUPPORTED */
92}
93
94void z_sched_ipi(void)
95{
96 /* NOTE: When adding code to this, make sure this is called
97 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
98 */
99#ifdef CONFIG_TRACE_SCHED_IPI
100 z_trace_sched_ipi();
101#endif /* CONFIG_TRACE_SCHED_IPI */
102
103#ifdef CONFIG_TIMESLICING
Anas Nashif0b473ce2024-03-27 07:13:45 -0400104 if (thread_is_sliceable(_current)) {
Anas Nashif37df4852024-03-08 07:51:01 -0500105 z_time_slice();
106 }
107#endif /* CONFIG_TIMESLICING */
108}