blob: e6568c40edf1072b92cdafc2053711955df5d57e [file] [log] [blame]
Andy Ross7a023cf2018-01-25 14:04:32 -08001/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Flavio Ceolin67ca1762018-09-14 10:43:44 -07006#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
7#define ZEPHYR_INCLUDE_SPINLOCK_H_
Andy Ross7a023cf2018-01-25 14:04:32 -08008
Anas Nashife1e05a22019-06-25 12:25:32 -04009#include <sys/atomic.h>
Anas Nashif5eb90ec2019-06-26 10:33:39 -040010#include <sys/__assert.h>
Flavio Ceolin625ac2e2019-03-14 11:41:21 -070011#include <stdbool.h>
Andrew Boie10fc01e2020-09-28 13:24:43 -070012#include <arch/cpu.h>
Andy Ross7367b842019-01-30 12:27:43 -080013
Marek Pieta2a7ccc52021-01-28 08:45:50 +010014#ifdef __cplusplus
15extern "C" {
16#endif
17
Andrew Boieb7e1e212020-09-28 13:25:35 -070018struct z_spinlock_key {
Andy Ross7a023cf2018-01-25 14:04:32 -080019 int key;
20};
21
Andy Ross46dc8a02020-05-07 09:47:25 -070022/**
23 * @brief Kernel Spin Lock
24 *
25 * This struct defines a spin lock record on which CPUs can wait with
26 * k_spin_lock(). Any number of spinlocks may be defined in
27 * application code.
28 */
Andrew Boie10fc01e2020-09-28 13:24:43 -070029struct k_spinlock {
30#ifdef CONFIG_SMP
31 atomic_t locked;
32#endif
33
34#ifdef CONFIG_SPIN_VALIDATE
35 /* Stores the thread that holds the lock with the locking CPU
36 * ID in the bottom two bits.
37 */
38 uintptr_t thread_cpu;
39#endif
40
41#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
42 !defined(CONFIG_SPIN_VALIDATE)
43 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
44 * the k_spinlock struct will have no members. The result
45 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
46 *
47 * This size difference causes problems when the k_spinlock
48 * is embedded into another struct like k_msgq, because C and
49 * C++ will have different ideas on the offsets of the members
50 * that come after the k_spinlock member.
51 *
52 * To prevent this we add a 1 byte dummy member to k_spinlock
53 * when the user selects C++ support and k_spinlock would
54 * otherwise be empty.
55 */
56 char dummy;
57#endif
58};
59
60/* There's a spinlock validation framework available when asserts are
61 * enabled. It adds a relatively hefty overhead (about 3k or so) to
62 * kernel code size, don't use on platforms known to be small.
63 */
64#ifdef CONFIG_SPIN_VALIDATE
65bool z_spin_lock_valid(struct k_spinlock *l);
66bool z_spin_unlock_valid(struct k_spinlock *l);
67void z_spin_lock_set_owner(struct k_spinlock *l);
Watson Zengf71cd2a2021-01-12 15:27:55 +080068BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
Daniel Leungd1495e92021-02-02 16:35:15 -080069
70# ifdef CONFIG_KERNEL_COHERENCE
71bool z_spin_lock_mem_coherent(struct k_spinlock *l);
72# endif /* CONFIG_KERNEL_COHERENCE */
73
Andrew Boie10fc01e2020-09-28 13:24:43 -070074#endif /* CONFIG_SPIN_VALIDATE */
Andy Ross46dc8a02020-05-07 09:47:25 -070075
76/**
77 * @brief Spinlock key type
78 *
79 * This type defines a "key" value used by a spinlock implementation
80 * to store the system interrupt state at the time of a call to
81 * k_spin_lock(). It is expected to be passed to a matching
82 * k_spin_unlock().
83 *
84 * This type is opaque and should not be inspected by application
85 * code.
86 */
Andrew Boieb7e1e212020-09-28 13:25:35 -070087typedef struct z_spinlock_key k_spinlock_key_t;
Andy Ross7a023cf2018-01-25 14:04:32 -080088
Andy Ross46dc8a02020-05-07 09:47:25 -070089/**
90 * @brief Lock a spinlock
91 *
92 * This routine locks the specified spinlock, returning a key handle
93 * representing interrupt state needed at unlock time. Upon
94 * returning, the calling thread is guaranteed not to be suspended or
95 * interrupted on its current CPU until it calls k_spin_unlock(). The
96 * implementation guarantees mutual exclusion: exactly one thread on
97 * one CPU will return from k_spin_lock() at a time. Other CPUs
98 * trying to acquire a lock already held by another CPU will enter an
99 * implementation-defined busy loop ("spinning") until the lock is
100 * released.
101 *
102 * Separate spin locks may be nested. It is legal to lock an
103 * (unlocked) spin lock while holding a different lock. Spin locks
104 * are not recursive, however: an attempt to acquire a spin lock that
105 * the CPU already holds will deadlock.
106 *
107 * In circumstances where only one CPU exists, the behavior of
108 * k_spin_lock() remains as specified above, though obviously no
109 * spinning will take place. Implementations may be free to optimize
110 * in uniprocessor contexts such that the locking reduces to an
111 * interrupt mask operation.
112 *
113 * @param l A pointer to the spinlock to lock
114 * @return A key value that must be passed to k_spin_unlock() when the
115 * lock is released.
116 */
Andy Ross4ff2dfc2019-01-28 09:35:37 -0800117static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Andy Ross7a023cf2018-01-25 14:04:32 -0800118{
Andy Rossfb505b32018-07-24 12:19:16 -0700119 ARG_UNUSED(l);
Andy Ross7a023cf2018-01-25 14:04:32 -0800120 k_spinlock_key_t k;
121
122 /* Note that we need to use the underlying arch-specific lock
123 * implementation. The "irq_lock()" API in SMP context is
124 * actually a wrapper for a global spinlock!
125 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800126 k.key = arch_irq_lock();
Andy Ross7a023cf2018-01-25 14:04:32 -0800127
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100128#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800129 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
Anas Nashif39f632e2020-12-07 13:15:42 -0500130# ifdef CONFIG_KERNEL_COHERENCE
Daniel Leungd1495e92021-02-02 16:35:15 -0800131 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000132# endif
Andy Ross7367b842019-01-30 12:27:43 -0800133#endif
134
Andy Ross7a023cf2018-01-25 14:04:32 -0800135#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -0800136 while (!atomic_cas(&l->locked, 0, 1)) {
137 }
138#endif
139
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100140#ifdef CONFIG_SPIN_VALIDATE
Andy Rossf37e0c62019-02-20 10:11:24 -0800141 z_spin_lock_set_owner(l);
142#endif
Andy Ross7a023cf2018-01-25 14:04:32 -0800143 return k;
144}
145
Andy Ross46dc8a02020-05-07 09:47:25 -0700146/**
147 * @brief Unlock a spin lock
148 *
149 * This releases a lock acquired by k_spin_lock(). After this
150 * function is called, any CPU will be able to acquire the lock. If
151 * other CPUs are currently spinning inside k_spin_lock() waiting for
152 * this lock, exactly one of them will return synchronously with the
153 * lock held.
154 *
155 * Spin locks must be properly nested. A call to k_spin_unlock() must
156 * be made on the lock object most recently locked using
157 * k_spin_lock(), using the key value that it returned. Attempts to
158 * unlock mis-nested locks, or to unlock locks that are not held, or
159 * to passing a key parameter other than the one returned from
160 * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
161 * of these errors can be detected by the framework.
162 *
163 * @param l A pointer to the spinlock to release
164 * @param key The value returned from k_spin_lock() when this lock was
165 * acquired
166 */
Andy Ross4ff2dfc2019-01-28 09:35:37 -0800167static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
168 k_spinlock_key_t key)
Andy Ross7a023cf2018-01-25 14:04:32 -0800169{
Andy Ross5aa74602019-02-05 09:35:57 -0800170 ARG_UNUSED(l);
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100171#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800172 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
Andy Ross7367b842019-01-30 12:27:43 -0800173#endif
174
Andy Ross7a023cf2018-01-25 14:04:32 -0800175#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -0800176 /* Strictly we don't need atomic_clear() here (which is an
177 * exchange operation that returns the old value). We are always
178 * setting a zero and (because we hold the lock) know the existing
179 * state won't change due to a race. But some architectures need
180 * a memory barrier when used like this, and we don't have a
181 * Zephyr framework for that.
182 */
183 atomic_clear(&l->locked);
184#endif
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800185 arch_irq_unlock(key.key);
Andy Ross7a023cf2018-01-25 14:04:32 -0800186}
187
Andy Rossaa6e21c2018-07-24 10:42:12 -0700188/* Internal function: releases the lock, but leaves local interrupts
189 * disabled
190 */
191static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
192{
Andy Ross5aa74602019-02-05 09:35:57 -0800193 ARG_UNUSED(l);
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100194#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800195 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
Andy Rossaa6e21c2018-07-24 10:42:12 -0700196#endif
197#ifdef CONFIG_SMP
198 atomic_clear(&l->locked);
199#endif
200}
201
Marek Pieta2a7ccc52021-01-28 08:45:50 +0100202#ifdef __cplusplus
203}
204#endif
Andy Rossaa6e21c2018-07-24 10:42:12 -0700205
Flavio Ceolin67ca1762018-09-14 10:43:44 -0700206#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */