blob: 5161443eca36b18b0c01f85061120587f921a8d0 [file] [log] [blame]
Andy Ross7a023cf2018-01-25 14:04:32 -08001/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Flavio Ceolin67ca1762018-09-14 10:43:44 -07006#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
7#define ZEPHYR_INCLUDE_SPINLOCK_H_
Andy Ross7a023cf2018-01-25 14:04:32 -08008
9#include <atomic.h>
10
Andy Rossfb505b32018-07-24 12:19:16 -070011/* These stubs aren't provided by the mocking framework, and I can't
12 * find a proper place to put them as mocking seems not to have a
13 * proper "arch" layer.
14 */
15#ifdef ZTEST_UNITTEST
Patrik Flykt4344e272019-03-08 14:19:05 -070016static inline int z_arch_irq_lock(void)
Andy Rossfb505b32018-07-24 12:19:16 -070017{
18 return 0;
19}
20
Patrik Flykt4344e272019-03-08 14:19:05 -070021static inline void z_arch_irq_unlock(int key)
Andy Rossfb505b32018-07-24 12:19:16 -070022{
23 ARG_UNUSED(key);
24}
25#endif
26
Andy Ross9c2c1152019-02-23 07:41:53 -080027/* There's a spinlock validation framework available when asserts are
28 * enabled. It adds a relatively hefty overhead (about 3k or so) to
29 * kernel code size, don't use on platforms known to be small. (Note
30 * we're using the kconfig value here. This isn't defined for every
31 * board, but the default of zero works well as an "infinity"
32 * fallback. There is a DT_FLASH_SIZE parameter too, but that seems
33 * even more poorly supported.
34 */
35#if (CONFIG_FLASH_SIZE == 0) || (CONFIG_FLASH_SIZE > 32)
Andy Ross7367b842019-01-30 12:27:43 -080036#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
Andy Ross5aa74602019-02-05 09:35:57 -080037#include <misc/__assert.h>
38struct k_spinlock;
39int z_spin_lock_valid(struct k_spinlock *l);
40int z_spin_unlock_valid(struct k_spinlock *l);
Andy Rossf37e0c62019-02-20 10:11:24 -080041void z_spin_lock_set_owner(struct k_spinlock *l);
Andy Ross7367b842019-01-30 12:27:43 -080042#define SPIN_VALIDATE
43#endif
Andy Ross9c2c1152019-02-23 07:41:53 -080044#endif
Andy Ross7367b842019-01-30 12:27:43 -080045
Andy Ross7a023cf2018-01-25 14:04:32 -080046struct k_spinlock_key {
47 int key;
48};
49
50typedef struct k_spinlock_key k_spinlock_key_t;
51
52struct k_spinlock {
53#ifdef CONFIG_SMP
54 atomic_t locked;
Andy Ross7a023cf2018-01-25 14:04:32 -080055#endif
Andy Ross7367b842019-01-30 12:27:43 -080056
57#ifdef SPIN_VALIDATE
58 /* Stores the thread that holds the lock with the locking CPU
59 * ID in the bottom two bits.
60 */
61 size_t thread_cpu;
Andy Ross7a023cf2018-01-25 14:04:32 -080062#endif
63};
64
Andy Ross4ff2dfc2019-01-28 09:35:37 -080065static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Andy Ross7a023cf2018-01-25 14:04:32 -080066{
Andy Rossfb505b32018-07-24 12:19:16 -070067 ARG_UNUSED(l);
Andy Ross7a023cf2018-01-25 14:04:32 -080068 k_spinlock_key_t k;
69
70 /* Note that we need to use the underlying arch-specific lock
71 * implementation. The "irq_lock()" API in SMP context is
72 * actually a wrapper for a global spinlock!
73 */
Patrik Flykt4344e272019-03-08 14:19:05 -070074 k.key = z_arch_irq_lock();
Andy Ross7a023cf2018-01-25 14:04:32 -080075
Andy Ross7367b842019-01-30 12:27:43 -080076#ifdef SPIN_VALIDATE
Andy Ross5aa74602019-02-05 09:35:57 -080077 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
Andy Ross7367b842019-01-30 12:27:43 -080078#endif
79
Andy Ross7a023cf2018-01-25 14:04:32 -080080#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -080081 while (!atomic_cas(&l->locked, 0, 1)) {
82 }
83#endif
84
Andy Rossf37e0c62019-02-20 10:11:24 -080085#ifdef SPIN_VALIDATE
86 z_spin_lock_set_owner(l);
87#endif
Andy Ross7a023cf2018-01-25 14:04:32 -080088 return k;
89}
90
Andy Ross4ff2dfc2019-01-28 09:35:37 -080091static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
92 k_spinlock_key_t key)
Andy Ross7a023cf2018-01-25 14:04:32 -080093{
Andy Ross5aa74602019-02-05 09:35:57 -080094 ARG_UNUSED(l);
Andy Ross7367b842019-01-30 12:27:43 -080095#ifdef SPIN_VALIDATE
Andy Ross5aa74602019-02-05 09:35:57 -080096 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
Andy Ross7367b842019-01-30 12:27:43 -080097#endif
98
Andy Ross7a023cf2018-01-25 14:04:32 -080099#ifdef CONFIG_SMP
Andy Ross7a023cf2018-01-25 14:04:32 -0800100 /* Strictly we don't need atomic_clear() here (which is an
101 * exchange operation that returns the old value). We are always
102 * setting a zero and (because we hold the lock) know the existing
103 * state won't change due to a race. But some architectures need
104 * a memory barrier when used like this, and we don't have a
105 * Zephyr framework for that.
106 */
107 atomic_clear(&l->locked);
108#endif
Patrik Flykt4344e272019-03-08 14:19:05 -0700109 z_arch_irq_unlock(key.key);
Andy Ross7a023cf2018-01-25 14:04:32 -0800110}
111
Andy Rossaa6e21c2018-07-24 10:42:12 -0700112/* Internal function: releases the lock, but leaves local interrupts
113 * disabled
114 */
115static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
116{
Andy Ross5aa74602019-02-05 09:35:57 -0800117 ARG_UNUSED(l);
Andy Rossaa6e21c2018-07-24 10:42:12 -0700118#ifdef SPIN_VALIDATE
119 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
120#endif
121#ifdef CONFIG_SMP
122 atomic_clear(&l->locked);
123#endif
124}
125
126
Flavio Ceolin67ca1762018-09-14 10:43:44 -0700127#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */