Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 6 | #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_ |
| 7 | #define ZEPHYR_INCLUDE_SPINLOCK_H_ |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 8 | |
Anas Nashif | e1e05a2 | 2019-06-25 12:25:32 -0400 | [diff] [blame] | 9 | #include <sys/atomic.h> |
Anas Nashif | 5eb90ec | 2019-06-26 10:33:39 -0400 | [diff] [blame] | 10 | #include <sys/__assert.h> |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 11 | #include <stdbool.h> |
Andrew Boie | 10fc01e | 2020-09-28 13:24:43 -0700 | [diff] [blame] | 12 | #include <arch/cpu.h> |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 13 | |
Andrew Boie | b7e1e21 | 2020-09-28 13:25:35 -0700 | [diff] [blame] | 14 | struct z_spinlock_key { |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 15 | int key; |
| 16 | }; |
| 17 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 18 | /** |
| 19 | * @brief Kernel Spin Lock |
| 20 | * |
| 21 | * This struct defines a spin lock record on which CPUs can wait with |
| 22 | * k_spin_lock(). Any number of spinlocks may be defined in |
| 23 | * application code. |
| 24 | */ |
Andrew Boie | 10fc01e | 2020-09-28 13:24:43 -0700 | [diff] [blame] | 25 | struct k_spinlock { |
| 26 | #ifdef CONFIG_SMP |
| 27 | atomic_t locked; |
| 28 | #endif |
| 29 | |
| 30 | #ifdef CONFIG_SPIN_VALIDATE |
| 31 | /* Stores the thread that holds the lock with the locking CPU |
| 32 | * ID in the bottom two bits. |
| 33 | */ |
| 34 | uintptr_t thread_cpu; |
| 35 | #endif |
| 36 | |
| 37 | #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \ |
| 38 | !defined(CONFIG_SPIN_VALIDATE) |
| 39 | /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined |
| 40 | * the k_spinlock struct will have no members. The result |
| 41 | * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1. |
| 42 | * |
| 43 | * This size difference causes problems when the k_spinlock |
| 44 | * is embedded into another struct like k_msgq, because C and |
| 45 | * C++ will have different ideas on the offsets of the members |
| 46 | * that come after the k_spinlock member. |
| 47 | * |
| 48 | * To prevent this we add a 1 byte dummy member to k_spinlock |
| 49 | * when the user selects C++ support and k_spinlock would |
| 50 | * otherwise be empty. |
| 51 | */ |
| 52 | char dummy; |
| 53 | #endif |
| 54 | }; |
| 55 | |
| 56 | /* There's a spinlock validation framework available when asserts are |
| 57 | * enabled. It adds a relatively hefty overhead (about 3k or so) to |
| 58 | * kernel code size, don't use on platforms known to be small. |
| 59 | */ |
| 60 | #ifdef CONFIG_SPIN_VALIDATE |
| 61 | bool z_spin_lock_valid(struct k_spinlock *l); |
| 62 | bool z_spin_unlock_valid(struct k_spinlock *l); |
| 63 | void z_spin_lock_set_owner(struct k_spinlock *l); |
| 64 | BUILD_ASSERT(CONFIG_MP_NUM_CPUS < 4, "Too many CPUs for mask"); |
| 65 | #endif /* CONFIG_SPIN_VALIDATE */ |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * @brief Spinlock key type |
| 69 | * |
| 70 | * This type defines a "key" value used by a spinlock implementation |
| 71 | * to store the system interrupt state at the time of a call to |
| 72 | * k_spin_lock(). It is expected to be passed to a matching |
| 73 | * k_spin_unlock(). |
| 74 | * |
| 75 | * This type is opaque and should not be inspected by application |
| 76 | * code. |
| 77 | */ |
Andrew Boie | b7e1e21 | 2020-09-28 13:25:35 -0700 | [diff] [blame] | 78 | typedef struct z_spinlock_key k_spinlock_key_t; |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 79 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 80 | /** |
| 81 | * @brief Lock a spinlock |
| 82 | * |
| 83 | * This routine locks the specified spinlock, returning a key handle |
| 84 | * representing interrupt state needed at unlock time. Upon |
| 85 | * returning, the calling thread is guaranteed not to be suspended or |
| 86 | * interrupted on its current CPU until it calls k_spin_unlock(). The |
| 87 | * implementation guarantees mutual exclusion: exactly one thread on |
| 88 | * one CPU will return from k_spin_lock() at a time. Other CPUs |
| 89 | * trying to acquire a lock already held by another CPU will enter an |
| 90 | * implementation-defined busy loop ("spinning") until the lock is |
| 91 | * released. |
| 92 | * |
| 93 | * Separate spin locks may be nested. It is legal to lock an |
| 94 | * (unlocked) spin lock while holding a different lock. Spin locks |
| 95 | * are not recursive, however: an attempt to acquire a spin lock that |
| 96 | * the CPU already holds will deadlock. |
| 97 | * |
| 98 | * In circumstances where only one CPU exists, the behavior of |
| 99 | * k_spin_lock() remains as specified above, though obviously no |
| 100 | * spinning will take place. Implementations may be free to optimize |
| 101 | * in uniprocessor contexts such that the locking reduces to an |
| 102 | * interrupt mask operation. |
| 103 | * |
| 104 | * @param l A pointer to the spinlock to lock |
| 105 | * @return A key value that must be passed to k_spin_unlock() when the |
| 106 | * lock is released. |
| 107 | */ |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 108 | static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 109 | { |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 110 | ARG_UNUSED(l); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 111 | k_spinlock_key_t k; |
| 112 | |
| 113 | /* Note that we need to use the underlying arch-specific lock |
| 114 | * implementation. The "irq_lock()" API in SMP context is |
| 115 | * actually a wrapper for a global spinlock! |
| 116 | */ |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 117 | k.key = arch_irq_lock(); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 118 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 119 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 120 | __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l); |
Anas Nashif | 87dddda | 2020-12-08 13:37:54 -0500 | [diff] [blame^] | 121 | # ifdef KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 122 | __ASSERT_NO_MSG(arch_mem_coherent(l)); |
| 123 | # endif |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 124 | #endif |
| 125 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 126 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 127 | while (!atomic_cas(&l->locked, 0, 1)) { |
| 128 | } |
| 129 | #endif |
| 130 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 131 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 132 | z_spin_lock_set_owner(l); |
| 133 | #endif |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 134 | return k; |
| 135 | } |
| 136 | |
Andy Ross | 46dc8a0 | 2020-05-07 09:47:25 -0700 | [diff] [blame] | 137 | /** |
| 138 | * @brief Unlock a spin lock |
| 139 | * |
| 140 | * This releases a lock acquired by k_spin_lock(). After this |
| 141 | * function is called, any CPU will be able to acquire the lock. If |
| 142 | * other CPUs are currently spinning inside k_spin_lock() waiting for |
| 143 | * this lock, exactly one of them will return synchronously with the |
| 144 | * lock held. |
| 145 | * |
| 146 | * Spin locks must be properly nested. A call to k_spin_unlock() must |
| 147 | * be made on the lock object most recently locked using |
| 148 | * k_spin_lock(), using the key value that it returned. Attempts to |
| 149 | * unlock mis-nested locks, or to unlock locks that are not held, or |
| 150 | * to passing a key parameter other than the one returned from |
| 151 | * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some |
| 152 | * of these errors can be detected by the framework. |
| 153 | * |
| 154 | * @param l A pointer to the spinlock to release |
| 155 | * @param key The value returned from k_spin_lock() when this lock was |
| 156 | * acquired |
| 157 | */ |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 158 | static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, |
| 159 | k_spinlock_key_t key) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 160 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 161 | ARG_UNUSED(l); |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 162 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 163 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 164 | #endif |
| 165 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 166 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 167 | /* Strictly we don't need atomic_clear() here (which is an |
| 168 | * exchange operation that returns the old value). We are always |
| 169 | * setting a zero and (because we hold the lock) know the existing |
| 170 | * state won't change due to a race. But some architectures need |
| 171 | * a memory barrier when used like this, and we don't have a |
| 172 | * Zephyr framework for that. |
| 173 | */ |
| 174 | atomic_clear(&l->locked); |
| 175 | #endif |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 176 | arch_irq_unlock(key.key); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 177 | } |
| 178 | |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 179 | /* Internal function: releases the lock, but leaves local interrupts |
| 180 | * disabled |
| 181 | */ |
| 182 | static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) |
| 183 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 184 | ARG_UNUSED(l); |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 185 | #ifdef CONFIG_SPIN_VALIDATE |
Andrew Boie | c1fdf98 | 2020-01-10 17:17:05 -0800 | [diff] [blame] | 186 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 187 | #endif |
| 188 | #ifdef CONFIG_SMP |
| 189 | atomic_clear(&l->locked); |
| 190 | #endif |
| 191 | } |
| 192 | |
| 193 | |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 194 | #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */ |