Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 6 | #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_ |
| 7 | #define ZEPHYR_INCLUDE_SPINLOCK_H_ |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 8 | |
Anas Nashif | e1e05a2 | 2019-06-25 12:25:32 -0400 | [diff] [blame] | 9 | #include <sys/atomic.h> |
Anas Nashif | 5eb90ec | 2019-06-26 10:33:39 -0400 | [diff] [blame] | 10 | #include <sys/__assert.h> |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 11 | #include <stdbool.h> |
Andrew Boie | 10fc01e | 2020-09-28 13:24:43 -0700 | [diff] [blame] | 12 | #include <arch/cpu.h> |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 13 | |
Marek Pieta | 2a7ccc5 | 2021-01-28 08:45:50 +0100 | [diff] [blame] | 14 | #ifdef __cplusplus |
| 15 | extern "C" { |
| 16 | #endif |
| 17 | |
Andrew Boie | b7e1e21 | 2020-09-28 13:25:35 -0700 | [diff] [blame] | 18 | struct z_spinlock_key { |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 19 | int key; |
| 20 | }; |
| 21 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 22 | /** |
| 23 | * @brief Kernel Spin Lock |
| 24 | * |
| 25 | * This struct defines a spin lock record on which CPUs can wait with |
| 26 | * k_spin_lock(). Any number of spinlocks may be defined in |
| 27 | * application code. |
| 28 | */ |
Andrew Boie | 10fc01e | 2020-09-28 13:24:43 -0700 | [diff] [blame] | 29 | struct k_spinlock { |
| 30 | #ifdef CONFIG_SMP |
| 31 | atomic_t locked; |
| 32 | #endif |
| 33 | |
| 34 | #ifdef CONFIG_SPIN_VALIDATE |
| 35 | /* Stores the thread that holds the lock with the locking CPU |
| 36 | * ID in the bottom two bits. |
| 37 | */ |
| 38 | uintptr_t thread_cpu; |
| 39 | #endif |
| 40 | |
| 41 | #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \ |
| 42 | !defined(CONFIG_SPIN_VALIDATE) |
| 43 | /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined |
| 44 | * the k_spinlock struct will have no members. The result |
| 45 | * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1. |
| 46 | * |
| 47 | * This size difference causes problems when the k_spinlock |
| 48 | * is embedded into another struct like k_msgq, because C and |
| 49 | * C++ will have different ideas on the offsets of the members |
| 50 | * that come after the k_spinlock member. |
| 51 | * |
| 52 | * To prevent this we add a 1 byte dummy member to k_spinlock |
| 53 | * when the user selects C++ support and k_spinlock would |
| 54 | * otherwise be empty. |
| 55 | */ |
| 56 | char dummy; |
| 57 | #endif |
| 58 | }; |
| 59 | |
| 60 | /* There's a spinlock validation framework available when asserts are |
| 61 | * enabled. It adds a relatively hefty overhead (about 3k or so) to |
| 62 | * kernel code size, don't use on platforms known to be small. |
| 63 | */ |
| 64 | #ifdef CONFIG_SPIN_VALIDATE |
| 65 | bool z_spin_lock_valid(struct k_spinlock *l); |
| 66 | bool z_spin_unlock_valid(struct k_spinlock *l); |
| 67 | void z_spin_lock_set_owner(struct k_spinlock *l); |
Watson Zeng | f71cd2a | 2021-01-12 15:27:55 +0800 | [diff] [blame] | 68 | BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask"); |
Daniel Leung | d1495e9 | 2021-02-02 16:35:15 -0800 | [diff] [blame] | 69 | |
| 70 | # ifdef CONFIG_KERNEL_COHERENCE |
| 71 | bool z_spin_lock_mem_coherent(struct k_spinlock *l); |
| 72 | # endif /* CONFIG_KERNEL_COHERENCE */ |
| 73 | |
Andrew Boie | 10fc01e | 2020-09-28 13:24:43 -0700 | [diff] [blame] | 74 | #endif /* CONFIG_SPIN_VALIDATE */ |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 75 | |
| 76 | /** |
| 77 | * @brief Spinlock key type |
| 78 | * |
| 79 | * This type defines a "key" value used by a spinlock implementation |
| 80 | * to store the system interrupt state at the time of a call to |
| 81 | * k_spin_lock(). It is expected to be passed to a matching |
| 82 | * k_spin_unlock(). |
| 83 | * |
| 84 | * This type is opaque and should not be inspected by application |
| 85 | * code. |
| 86 | */ |
Andrew Boie | b7e1e21 | 2020-09-28 13:25:35 -0700 | [diff] [blame] | 87 | typedef struct z_spinlock_key k_spinlock_key_t; |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 88 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 89 | /** |
| 90 | * @brief Lock a spinlock |
| 91 | * |
| 92 | * This routine locks the specified spinlock, returning a key handle |
| 93 | * representing interrupt state needed at unlock time. Upon |
| 94 | * returning, the calling thread is guaranteed not to be suspended or |
| 95 | * interrupted on its current CPU until it calls k_spin_unlock(). The |
| 96 | * implementation guarantees mutual exclusion: exactly one thread on |
| 97 | * one CPU will return from k_spin_lock() at a time. Other CPUs |
| 98 | * trying to acquire a lock already held by another CPU will enter an |
| 99 | * implementation-defined busy loop ("spinning") until the lock is |
| 100 | * released. |
| 101 | * |
| 102 | * Separate spin locks may be nested. It is legal to lock an |
| 103 | * (unlocked) spin lock while holding a different lock. Spin locks |
| 104 | * are not recursive, however: an attempt to acquire a spin lock that |
| 105 | * the CPU already holds will deadlock. |
| 106 | * |
| 107 | * In circumstances where only one CPU exists, the behavior of |
| 108 | * k_spin_lock() remains as specified above, though obviously no |
| 109 | * spinning will take place. Implementations may be free to optimize |
| 110 | * in uniprocessor contexts such that the locking reduces to an |
| 111 | * interrupt mask operation. |
| 112 | * |
| 113 | * @param l A pointer to the spinlock to lock |
| 114 | * @return A key value that must be passed to k_spin_unlock() when the |
| 115 | * lock is released. |
| 116 | */ |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 117 | static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 118 | { |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 119 | ARG_UNUSED(l); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 120 | k_spinlock_key_t k; |
| 121 | |
| 122 | /* Note that we need to use the underlying arch-specific lock |
| 123 | * implementation. The "irq_lock()" API in SMP context is |
| 124 | * actually a wrapper for a global spinlock! |
| 125 | */ |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 126 | k.key = arch_irq_lock(); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 127 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 128 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 129 | __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l); |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 130 | # ifdef CONFIG_KERNEL_COHERENCE |
Daniel Leung | d1495e9 | 2021-02-02 16:35:15 -0800 | [diff] [blame] | 131 | __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l)); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 132 | # endif |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 133 | #endif |
| 134 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 135 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 136 | while (!atomic_cas(&l->locked, 0, 1)) { |
| 137 | } |
| 138 | #endif |
| 139 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 140 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 141 | z_spin_lock_set_owner(l); |
| 142 | #endif |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 143 | return k; |
| 144 | } |
| 145 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 146 | /** |
| 147 | * @brief Unlock a spin lock |
| 148 | * |
| 149 | * This releases a lock acquired by k_spin_lock(). After this |
| 150 | * function is called, any CPU will be able to acquire the lock. If |
| 151 | * other CPUs are currently spinning inside k_spin_lock() waiting for |
| 152 | * this lock, exactly one of them will return synchronously with the |
| 153 | * lock held. |
| 154 | * |
| 155 | * Spin locks must be properly nested. A call to k_spin_unlock() must |
| 156 | * be made on the lock object most recently locked using |
| 157 | * k_spin_lock(), using the key value that it returned. Attempts to |
| 158 | * unlock mis-nested locks, or to unlock locks that are not held, or |
| 159 | * to passing a key parameter other than the one returned from |
| 160 | * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some |
| 161 | * of these errors can be detected by the framework. |
| 162 | * |
| 163 | * @param l A pointer to the spinlock to release |
| 164 | * @param key The value returned from k_spin_lock() when this lock was |
| 165 | * acquired |
| 166 | */ |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 167 | static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, |
| 168 | k_spinlock_key_t key) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 169 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 170 | ARG_UNUSED(l); |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 171 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 172 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 173 | #endif |
| 174 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 175 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 176 | /* Strictly we don't need atomic_clear() here (which is an |
| 177 | * exchange operation that returns the old value). We are always |
| 178 | * setting a zero and (because we hold the lock) know the existing |
| 179 | * state won't change due to a race. But some architectures need |
| 180 | * a memory barrier when used like this, and we don't have a |
| 181 | * Zephyr framework for that. |
| 182 | */ |
| 183 | atomic_clear(&l->locked); |
| 184 | #endif |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 185 | arch_irq_unlock(key.key); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 186 | } |
| 187 | |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 188 | /* Internal function: releases the lock, but leaves local interrupts |
| 189 | * disabled |
| 190 | */ |
| 191 | static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) |
| 192 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 193 | ARG_UNUSED(l); |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 194 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 195 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 196 | #endif |
| 197 | #ifdef CONFIG_SMP |
| 198 | atomic_clear(&l->locked); |
| 199 | #endif |
| 200 | } |
| 201 | |
Marek Pieta | 2a7ccc5 | 2021-01-28 08:45:50 +0100 | [diff] [blame] | 202 | #ifdef __cplusplus |
| 203 | } |
| 204 | #endif |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 205 | |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 206 | #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */ |