Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 6 | #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_ |
| 7 | #define ZEPHYR_INCLUDE_SPINLOCK_H_ |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 8 | |
| 9 | #include <atomic.h> |
| 10 | |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 11 | /* These stubs aren't provided by the mocking framework, and I can't |
| 12 | * find a proper place to put them as mocking seems not to have a |
| 13 | * proper "arch" layer. |
| 14 | */ |
| 15 | #ifdef ZTEST_UNITTEST |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 16 | static inline int z_arch_irq_lock(void) |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 17 | { |
| 18 | return 0; |
| 19 | } |
| 20 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 21 | static inline void z_arch_irq_unlock(int key) |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 22 | { |
| 23 | ARG_UNUSED(key); |
| 24 | } |
| 25 | #endif |
| 26 | |
Andy Ross | 9c2c115 | 2019-02-23 07:41:53 -0800 | [diff] [blame] | 27 | /* There's a spinlock validation framework available when asserts are |
| 28 | * enabled. It adds a relatively hefty overhead (about 3k or so) to |
| 29 | * kernel code size, don't use on platforms known to be small. (Note |
| 30 | * we're using the kconfig value here. This isn't defined for every |
| 31 | * board, but the default of zero works well as an "infinity" |
| 32 | * fallback. There is a DT_FLASH_SIZE parameter too, but that seems |
| 33 | * even more poorly supported. |
| 34 | */ |
| 35 | #if (CONFIG_FLASH_SIZE == 0) || (CONFIG_FLASH_SIZE > 32) |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 36 | #if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4) |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 37 | #include <misc/__assert.h> |
| 38 | struct k_spinlock; |
| 39 | int z_spin_lock_valid(struct k_spinlock *l); |
| 40 | int z_spin_unlock_valid(struct k_spinlock *l); |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 41 | void z_spin_lock_set_owner(struct k_spinlock *l); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 42 | #define SPIN_VALIDATE |
| 43 | #endif |
Andy Ross | 9c2c115 | 2019-02-23 07:41:53 -0800 | [diff] [blame] | 44 | #endif |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 45 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 46 | struct k_spinlock_key { |
| 47 | int key; |
| 48 | }; |
| 49 | |
| 50 | typedef struct k_spinlock_key k_spinlock_key_t; |
| 51 | |
| 52 | struct k_spinlock { |
| 53 | #ifdef CONFIG_SMP |
| 54 | atomic_t locked; |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 55 | #endif |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 56 | |
| 57 | #ifdef SPIN_VALIDATE |
| 58 | /* Stores the thread that holds the lock with the locking CPU |
| 59 | * ID in the bottom two bits. |
| 60 | */ |
| 61 | size_t thread_cpu; |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 62 | #endif |
| 63 | }; |
| 64 | |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 65 | static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 66 | { |
Andy Ross | fb505b3 | 2018-07-24 12:19:16 -0700 | [diff] [blame] | 67 | ARG_UNUSED(l); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 68 | k_spinlock_key_t k; |
| 69 | |
| 70 | /* Note that we need to use the underlying arch-specific lock |
| 71 | * implementation. The "irq_lock()" API in SMP context is |
| 72 | * actually a wrapper for a global spinlock! |
| 73 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 74 | k.key = z_arch_irq_lock(); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 75 | |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 76 | #ifdef SPIN_VALIDATE |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 77 | __ASSERT(z_spin_lock_valid(l), "Recursive spinlock"); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 78 | #endif |
| 79 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 80 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 81 | while (!atomic_cas(&l->locked, 0, 1)) { |
| 82 | } |
| 83 | #endif |
| 84 | |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 85 | #ifdef SPIN_VALIDATE |
| 86 | z_spin_lock_set_owner(l); |
| 87 | #endif |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 88 | return k; |
| 89 | } |
| 90 | |
Andy Ross | 4ff2dfc | 2019-01-28 09:35:37 -0800 | [diff] [blame] | 91 | static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, |
| 92 | k_spinlock_key_t key) |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 93 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 94 | ARG_UNUSED(l); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 95 | #ifdef SPIN_VALIDATE |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 96 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); |
Andy Ross | 7367b84 | 2019-01-30 12:27:43 -0800 | [diff] [blame] | 97 | #endif |
| 98 | |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 99 | #ifdef CONFIG_SMP |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 100 | /* Strictly we don't need atomic_clear() here (which is an |
| 101 | * exchange operation that returns the old value). We are always |
| 102 | * setting a zero and (because we hold the lock) know the existing |
| 103 | * state won't change due to a race. But some architectures need |
| 104 | * a memory barrier when used like this, and we don't have a |
| 105 | * Zephyr framework for that. |
| 106 | */ |
| 107 | atomic_clear(&l->locked); |
| 108 | #endif |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 109 | z_arch_irq_unlock(key.key); |
Andy Ross | 7a023cf | 2018-01-25 14:04:32 -0800 | [diff] [blame] | 110 | } |
| 111 | |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 112 | /* Internal function: releases the lock, but leaves local interrupts |
| 113 | * disabled |
| 114 | */ |
| 115 | static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) |
| 116 | { |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 117 | ARG_UNUSED(l); |
Andy Ross | aa6e21c | 2018-07-24 10:42:12 -0700 | [diff] [blame] | 118 | #ifdef SPIN_VALIDATE |
| 119 | __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); |
| 120 | #endif |
| 121 | #ifdef CONFIG_SMP |
| 122 | atomic_clear(&l->locked); |
| 123 | #endif |
| 124 | } |
| 125 | |
| 126 | |
Flavio Ceolin | 67ca176 | 2018-09-14 10:43:44 -0700 | [diff] [blame] | 127 | #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */ |