blob: 44d4c6abdada45ba684c899c9121f6d52e6a3748 [file] [log] [blame]
Andy Ross7a023cf2018-01-25 14:04:32 -08001/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Flavio Ceolin67ca1762018-09-14 10:43:44 -07006#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
7#define ZEPHYR_INCLUDE_SPINLOCK_H_
Andy Ross7a023cf2018-01-25 14:04:32 -08008
Anas Nashife1e05a22019-06-25 12:25:32 -04009#include <sys/atomic.h>
Anas Nashif5eb90ec2019-06-26 10:33:39 -040010#include <sys/__assert.h>
Flavio Ceolin625ac2e2019-03-14 11:41:21 -070011#include <stdbool.h>
Andrew Boie10fc01e2020-09-28 13:24:43 -070012#include <arch/cpu.h>
Andy Ross7367b842019-01-30 12:27:43 -080013
Andrew Boieb7e1e212020-09-28 13:25:35 -070014struct z_spinlock_key {
Andy Ross7a023cf2018-01-25 14:04:32 -080015 int key;
16};
17
Andy Ross46dc8a02020-05-07 09:47:25 -070018/**
19 * @brief Kernel Spin Lock
20 *
21 * This struct defines a spin lock record on which CPUs can wait with
22 * k_spin_lock(). Any number of spinlocks may be defined in
23 * application code.
24 */
Andrew Boie10fc01e2020-09-28 13:24:43 -070025struct k_spinlock {
26#ifdef CONFIG_SMP
27 atomic_t locked;
28#endif
29
30#ifdef CONFIG_SPIN_VALIDATE
31 /* Stores the thread that holds the lock with the locking CPU
32 * ID in the bottom two bits.
33 */
34 uintptr_t thread_cpu;
35#endif
36
37#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
38 !defined(CONFIG_SPIN_VALIDATE)
39 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
40 * the k_spinlock struct will have no members. The result
41 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
42 *
43 * This size difference causes problems when the k_spinlock
44 * is embedded into another struct like k_msgq, because C and
45 * C++ will have different ideas on the offsets of the members
46 * that come after the k_spinlock member.
47 *
48 * To prevent this we add a 1 byte dummy member to k_spinlock
49 * when the user selects C++ support and k_spinlock would
50 * otherwise be empty.
51 */
52 char dummy;
53#endif
54};
55
56/* There's a spinlock validation framework available when asserts are
57 * enabled. It adds a relatively hefty overhead (about 3k or so) to
58 * kernel code size, don't use on platforms known to be small.
59 */
60#ifdef CONFIG_SPIN_VALIDATE
61bool z_spin_lock_valid(struct k_spinlock *l);
62bool z_spin_unlock_valid(struct k_spinlock *l);
63void z_spin_lock_set_owner(struct k_spinlock *l);
64BUILD_ASSERT(CONFIG_MP_NUM_CPUS < 4, "Too many CPUs for mask");
65#endif /* CONFIG_SPIN_VALIDATE */
Andy Ross46dc8a02020-05-07 09:47:25 -070066
67/**
68 * @brief Spinlock key type
69 *
70 * This type defines a "key" value used by a spinlock implementation
71 * to store the system interrupt state at the time of a call to
72 * k_spin_lock(). It is expected to be passed to a matching
73 * k_spin_unlock().
74 *
75 * This type is opaque and should not be inspected by application
76 * code.
77 */
Andrew Boieb7e1e212020-09-28 13:25:35 -070078typedef struct z_spinlock_key k_spinlock_key_t;
Andy Ross7a023cf2018-01-25 14:04:32 -080079
Andy Ross46dc8a02020-05-07 09:47:25 -070080/**
81 * @brief Lock a spinlock
82 *
83 * This routine locks the specified spinlock, returning a key handle
84 * representing interrupt state needed at unlock time. Upon
85 * returning, the calling thread is guaranteed not to be suspended or
86 * interrupted on its current CPU until it calls k_spin_unlock(). The
87 * implementation guarantees mutual exclusion: exactly one thread on
88 * one CPU will return from k_spin_lock() at a time. Other CPUs
89 * trying to acquire a lock already held by another CPU will enter an
90 * implementation-defined busy loop ("spinning") until the lock is
91 * released.
92 *
93 * Separate spin locks may be nested. It is legal to lock an
94 * (unlocked) spin lock while holding a different lock. Spin locks
95 * are not recursive, however: an attempt to acquire a spin lock that
96 * the CPU already holds will deadlock.
97 *
98 * In circumstances where only one CPU exists, the behavior of
99 * k_spin_lock() remains as specified above, though obviously no
100 * spinning will take place. Implementations may be free to optimize
101 * in uniprocessor contexts such that the locking reduces to an
102 * interrupt mask operation.
103 *
104 * @param l A pointer to the spinlock to lock
105 * @return A key value that must be passed to k_spin_unlock() when the
106 * lock is released.
107 */
Andy Ross4ff2dfc2019-01-28 09:35:37 -0800108static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Andy Ross7a023cf2018-01-25 14:04:32 -0800109{
Andy Rossfb505b32018-07-24 12:19:16 -0700110 ARG_UNUSED(l);
Andy Ross7a023cf2018-01-25 14:04:32 -0800111 k_spinlock_key_t k;
112
113 /* Note that we need to use the underlying arch-specific lock
114 * implementation. The "irq_lock()" API in SMP context is
115 * actually a wrapper for a global spinlock!
116 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800117 k.key = arch_irq_lock();
Andy Ross7a023cf2018-01-25 14:04:32 -0800118
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100119#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800120 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
Anas Nashif87dddda2020-12-08 13:37:54 -0500121# ifdef KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000122 __ASSERT_NO_MSG(arch_mem_coherent(l));
123# endif
Andy Ross7367b842019-01-30 12:27:43 -0800124#endif
125
Andy Ross7a023cf2018-01-25 14:04:32 -0800126#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -0800127 while (!atomic_cas(&l->locked, 0, 1)) {
128 }
129#endif
130
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100131#ifdef CONFIG_SPIN_VALIDATE
Andy Rossf37e0c62019-02-20 10:11:24 -0800132 z_spin_lock_set_owner(l);
133#endif
Andy Ross7a023cf2018-01-25 14:04:32 -0800134 return k;
135}
136
Andy Ross46dc8a02020-05-07 09:47:25 -0700137/**
138 * @brief Unlock a spin lock
139 *
140 * This releases a lock acquired by k_spin_lock(). After this
141 * function is called, any CPU will be able to acquire the lock. If
142 * other CPUs are currently spinning inside k_spin_lock() waiting for
143 * this lock, exactly one of them will return synchronously with the
144 * lock held.
145 *
146 * Spin locks must be properly nested. A call to k_spin_unlock() must
147 * be made on the lock object most recently locked using
148 * k_spin_lock(), using the key value that it returned. Attempts to
149 * unlock mis-nested locks, or to unlock locks that are not held, or
150 * to passing a key parameter other than the one returned from
151 * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
152 * of these errors can be detected by the framework.
153 *
154 * @param l A pointer to the spinlock to release
155 * @param key The value returned from k_spin_lock() when this lock was
156 * acquired
157 */
Andy Ross4ff2dfc2019-01-28 09:35:37 -0800158static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
159 k_spinlock_key_t key)
Andy Ross7a023cf2018-01-25 14:04:32 -0800160{
Andy Ross5aa74602019-02-05 09:35:57 -0800161 ARG_UNUSED(l);
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100162#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800163 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
Andy Ross7367b842019-01-30 12:27:43 -0800164#endif
165
Andy Ross7a023cf2018-01-25 14:04:32 -0800166#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -0800167 /* Strictly we don't need atomic_clear() here (which is an
168 * exchange operation that returns the old value). We are always
169 * setting a zero and (because we hold the lock) know the existing
170 * state won't change due to a race. But some architectures need
171 * a memory barrier when used like this, and we don't have a
172 * Zephyr framework for that.
173 */
174 atomic_clear(&l->locked);
175#endif
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800176 arch_irq_unlock(key.key);
Andy Ross7a023cf2018-01-25 14:04:32 -0800177}
178
Andy Rossaa6e21c2018-07-24 10:42:12 -0700179/* Internal function: releases the lock, but leaves local interrupts
180 * disabled
181 */
182static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
183{
Andy Ross5aa74602019-02-05 09:35:57 -0800184 ARG_UNUSED(l);
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100185#ifdef CONFIG_SPIN_VALIDATE
Andrew Boiec1fdf982020-01-10 17:17:05 -0800186 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
Andy Rossaa6e21c2018-07-24 10:42:12 -0700187#endif
188#ifdef CONFIG_SMP
189 atomic_clear(&l->locked);
190#endif
191}
192
193
Flavio Ceolin67ca1762018-09-14 10:43:44 -0700194#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */