Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file @brief mutex kernel services |
| 9 | * |
| 10 | * This module contains routines for handling mutex locking and unlocking. |
| 11 | * |
| 12 | * Mutexes implement a priority inheritance algorithm that boosts the priority |
| 13 | * level of the owning thread to match the priority level of the highest |
| 14 | * priority thread waiting on the mutex. |
| 15 | * |
| 16 | * Each mutex that contributes to priority inheritance must be released in the |
Ramakrishna Pallala | 6742626 | 2017-10-24 19:04:43 -0400 | [diff] [blame] | 17 | * reverse order in which it was acquired. Furthermore each subsequent mutex |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 18 | * that contributes to raising the owning thread's priority level must be |
| 19 | * acquired at a point after the most recent "bumping" of the priority level. |
| 20 | * |
| 21 | * For example, if thread A has two mutexes contributing to the raising of its |
| 22 | * priority level, the second mutex M2 must be acquired by thread A after |
| 23 | * thread A's priority level was bumped due to owning the first mutex M1. |
| 24 | * When releasing the mutex, thread A must release M2 before it releases M1. |
| 25 | * Failure to follow this nested model may result in threads running at |
| 26 | * unexpected priority levels (too high, or too low). |
| 27 | */ |
| 28 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 29 | #include <zephyr/kernel.h> |
| 30 | #include <zephyr/kernel_structs.h> |
| 31 | #include <zephyr/toolchain.h> |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 32 | #include <ksched.h> |
Anas Nashif | 8634c3b | 2023-08-29 17:03:12 +0000 | [diff] [blame] | 33 | #include <wait_q.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 34 | #include <errno.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 35 | #include <zephyr/init.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 36 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 37 | #include <zephyr/tracing/tracing.h> |
| 38 | #include <zephyr/sys/check.h> |
| 39 | #include <zephyr/logging/log.h> |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 40 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 41 | |
Andy Ross | 7df0216 | 2018-07-25 13:55:59 -0700 | [diff] [blame] | 42 | /* We use a global spinlock here because some of the synchronization |
| 43 | * is protecting things like owner thread priorities which aren't |
| 44 | * "part of" a single k_mutex. Should move those bits of the API |
| 45 | * under the scheduler lock so we can break this up. |
| 46 | */ |
| 47 | static struct k_spinlock lock; |
| 48 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 49 | #ifdef CONFIG_OBJ_CORE_MUTEX |
| 50 | static struct k_obj_type obj_type_mutex; |
| 51 | #endif |
| 52 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 53 | int z_impl_k_mutex_init(struct k_mutex *mutex) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 54 | { |
| 55 | mutex->owner = NULL; |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 56 | mutex->lock_count = 0U; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 57 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 58 | z_waitq_init(&mutex->wait_q); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 59 | |
Anas Nashif | c91cad7 | 2023-09-26 21:32:13 +0000 | [diff] [blame] | 60 | k_object_init(mutex); |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 61 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 62 | #ifdef CONFIG_OBJ_CORE_MUTEX |
| 63 | k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex); |
| 64 | #endif |
| 65 | |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 66 | SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0); |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 67 | |
| 68 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 69 | } |
| 70 | |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 71 | #ifdef CONFIG_USERSPACE |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 72 | static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex) |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 73 | { |
Anas Nashif | 9c4d881 | 2023-09-27 11:09:45 +0000 | [diff] [blame] | 74 | Z_OOPS(K_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX)); |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 75 | return z_impl_k_mutex_init(mutex); |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 76 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 77 | #include <syscalls/k_mutex_init_mrsh.c> |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 78 | #endif |
| 79 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 80 | static int32_t new_prio_for_inheritance(int32_t target, int32_t limit) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 81 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 82 | int new_prio = z_is_prio_higher(target, limit) ? target : limit; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 83 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 84 | new_prio = z_get_new_prio_with_ceiling(new_prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 85 | |
| 86 | return new_prio; |
| 87 | } |
| 88 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 89 | static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 90 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 91 | if (mutex->owner->base.prio != new_prio) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 92 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 93 | LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)", |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 94 | mutex->owner, z_is_thread_ready(mutex->owner) ? |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 95 | 'y' : 'n', |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 96 | new_prio, mutex->owner->base.prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 97 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 98 | return z_set_prio(mutex->owner, new_prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 99 | } |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 100 | return false; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 101 | } |
| 102 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 103 | int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 104 | { |
Flavio Ceolin | 0866d18 | 2018-08-14 17:57:08 -0700 | [diff] [blame] | 105 | int new_prio; |
Andy Ross | 7df0216 | 2018-07-25 13:55:59 -0700 | [diff] [blame] | 106 | k_spinlock_key_t key; |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 107 | bool resched = false; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 108 | |
Andrew Boie | 6af9793 | 2020-05-27 11:48:30 -0700 | [diff] [blame] | 109 | __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs"); |
| 110 | |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 111 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, lock, mutex, timeout); |
| 112 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 113 | key = k_spin_lock(&lock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 114 | |
Adithya Baglody | 87e592e | 2018-10-25 14:10:52 +0530 | [diff] [blame] | 115 | if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 116 | |
Adithya Baglody | 87e592e | 2018-10-25 14:10:52 +0530 | [diff] [blame] | 117 | mutex->owner_orig_prio = (mutex->lock_count == 0U) ? |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 118 | _current->base.prio : |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 119 | mutex->owner_orig_prio; |
| 120 | |
| 121 | mutex->lock_count++; |
| 122 | mutex->owner = _current; |
| 123 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 124 | LOG_DBG("%p took mutex %p, count: %d, orig prio: %d", |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 125 | _current, mutex, mutex->lock_count, |
| 126 | mutex->owner_orig_prio); |
| 127 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 128 | k_spin_unlock(&lock, key); |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 129 | |
| 130 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 135 | if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) { |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 136 | k_spin_unlock(&lock, key); |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 137 | |
| 138 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EBUSY); |
| 139 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 140 | return -EBUSY; |
| 141 | } |
| 142 | |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 143 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout); |
| 144 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 145 | new_prio = new_prio_for_inheritance(_current->base.prio, |
| 146 | mutex->owner->base.prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 147 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 148 | LOG_DBG("adjusting prio up on mutex %p", mutex); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 149 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 150 | if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) { |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 151 | resched = adjust_owner_prio(mutex, new_prio); |
Benjamin Walsh | 4bfa005 | 2017-01-14 18:57:58 -0500 | [diff] [blame] | 152 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 153 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 154 | int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 155 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 156 | LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 157 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 158 | LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex, |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 159 | got_mutex ? 'y' : 'n'); |
| 160 | |
| 161 | if (got_mutex == 0) { |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 162 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | /* timed out */ |
| 167 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 168 | LOG_DBG("%p timeout on mutex %p", _current, mutex); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 169 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 170 | key = k_spin_lock(&lock); |
| 171 | |
Qi Yang | 89c4a07 | 2022-08-12 10:44:38 +0800 | [diff] [blame] | 172 | /* |
| 173 | * Check if mutex was unlocked after this thread was unpended. |
| 174 | * If so, skip adjusting owner's priority down. |
| 175 | */ |
| 176 | if (likely(mutex->owner != NULL)) { |
| 177 | struct k_thread *waiter = z_waitq_head(&mutex->wait_q); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 178 | |
Qi Yang | 89c4a07 | 2022-08-12 10:44:38 +0800 | [diff] [blame] | 179 | new_prio = (waiter != NULL) ? |
| 180 | new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) : |
| 181 | mutex->owner_orig_prio; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 182 | |
Qi Yang | 89c4a07 | 2022-08-12 10:44:38 +0800 | [diff] [blame] | 183 | LOG_DBG("adjusting prio down on mutex %p", mutex); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 184 | |
Qi Yang | 89c4a07 | 2022-08-12 10:44:38 +0800 | [diff] [blame] | 185 | resched = adjust_owner_prio(mutex, new_prio) || resched; |
| 186 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 187 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 188 | if (resched) { |
| 189 | z_reschedule(&lock, key); |
| 190 | } else { |
| 191 | k_spin_unlock(&lock, key); |
| 192 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 193 | |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 194 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EAGAIN); |
| 195 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 196 | return -EAGAIN; |
| 197 | } |
| 198 | |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 199 | #ifdef CONFIG_USERSPACE |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 200 | static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, |
| 201 | k_timeout_t timeout) |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 202 | { |
Anas Nashif | 9c4d881 | 2023-09-27 11:09:45 +0000 | [diff] [blame] | 203 | Z_OOPS(K_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 204 | return z_impl_k_mutex_lock(mutex, timeout); |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 205 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 206 | #include <syscalls/k_mutex_lock_mrsh.c> |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 207 | #endif |
| 208 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 209 | int z_impl_k_mutex_unlock(struct k_mutex *mutex) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 210 | { |
Flavio Ceolin | 4369363 | 2018-11-01 15:17:03 -0700 | [diff] [blame] | 211 | struct k_thread *new_owner; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 212 | |
Andrew Boie | 6af9793 | 2020-05-27 11:48:30 -0700 | [diff] [blame] | 213 | __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs"); |
| 214 | |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 215 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, unlock, mutex); |
| 216 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 217 | CHECKIF(mutex->owner == NULL) { |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 218 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EINVAL); |
| 219 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 220 | return -EINVAL; |
| 221 | } |
| 222 | /* |
| 223 | * The current thread does not own the mutex. |
| 224 | */ |
| 225 | CHECKIF(mutex->owner != _current) { |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 226 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM); |
| 227 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 228 | return -EPERM; |
| 229 | } |
| 230 | |
| 231 | /* |
| 232 | * Attempt to unlock a mutex which is unlocked. mutex->lock_count |
| 233 | * cannot be zero if the current thread is equal to mutex->owner, |
| 234 | * therefore no underflow check is required. Use assert to catch |
| 235 | * undefined behavior. |
| 236 | */ |
| 237 | __ASSERT_NO_MSG(mutex->lock_count > 0U); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 238 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 239 | LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 240 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 241 | /* |
| 242 | * If we are the owner and count is greater than 1, then decrement |
| 243 | * the count and return and keep current thread as the owner. |
| 244 | */ |
Chih Hung Yu | 0ef77d4 | 2021-06-30 14:40:16 +0800 | [diff] [blame] | 245 | if (mutex->lock_count > 1U) { |
Nicolás Bértolo | 258fd2d | 2018-12-04 14:49:49 -0300 | [diff] [blame] | 246 | mutex->lock_count--; |
Adithya Baglody | 87e592e | 2018-10-25 14:10:52 +0530 | [diff] [blame] | 247 | goto k_mutex_unlock_return; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 248 | } |
| 249 | |
Andy Ross | 7df0216 | 2018-07-25 13:55:59 -0700 | [diff] [blame] | 250 | k_spinlock_key_t key = k_spin_lock(&lock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 251 | |
| 252 | adjust_owner_prio(mutex, mutex->owner_orig_prio); |
| 253 | |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 254 | /* Get the new owner, if any */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 255 | new_owner = z_unpend_first_thread(&mutex->wait_q); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 256 | |
Andy Ross | 5792ee6 | 2018-04-05 08:55:47 -0700 | [diff] [blame] | 257 | mutex->owner = new_owner; |
| 258 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 259 | LOG_DBG("new owner of mutex %p: %p (prio: %d)", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 260 | mutex, new_owner, new_owner ? new_owner->base.prio : -1000); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 261 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 262 | if (new_owner != NULL) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 263 | /* |
| 264 | * new owner is already of higher or equal prio than first |
| 265 | * waiter since the wait queue is priority-based: no need to |
Nazar Kazakov | f483b1b | 2022-03-16 21:07:43 +0000 | [diff] [blame] | 266 | * adjust its priority |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 267 | */ |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 268 | mutex->owner_orig_prio = new_owner->base.prio; |
Andy Ross | 7022000 | 2019-11-21 09:38:38 -0800 | [diff] [blame] | 269 | arch_thread_return_value_set(new_owner, 0); |
| 270 | z_ready_thread(new_owner); |
| 271 | z_reschedule(&lock, key); |
Nicolás Bértolo | 258fd2d | 2018-12-04 14:49:49 -0300 | [diff] [blame] | 272 | } else { |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 273 | mutex->lock_count = 0U; |
Andy Ross | 7df0216 | 2018-07-25 13:55:59 -0700 | [diff] [blame] | 274 | k_spin_unlock(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 275 | } |
| 276 | |
Andy Ross | 5792ee6 | 2018-04-05 08:55:47 -0700 | [diff] [blame] | 277 | |
Adithya Baglody | 87e592e | 2018-10-25 14:10:52 +0530 | [diff] [blame] | 278 | k_mutex_unlock_return: |
Torbjörn Leksell | ed6148a | 2021-03-26 10:28:23 +0100 | [diff] [blame] | 279 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, 0); |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 280 | |
| 281 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 282 | } |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 283 | |
| 284 | #ifdef CONFIG_USERSPACE |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 285 | static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex) |
Leandro Pereira | bf44bac | 2018-04-05 09:25:01 -0700 | [diff] [blame] | 286 | { |
Anas Nashif | 9c4d881 | 2023-09-27 11:09:45 +0000 | [diff] [blame] | 287 | Z_OOPS(K_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); |
Anas Nashif | 86bb2d0 | 2019-05-04 10:18:13 -0400 | [diff] [blame] | 288 | return z_impl_k_mutex_unlock(mutex); |
Leandro Pereira | bf44bac | 2018-04-05 09:25:01 -0700 | [diff] [blame] | 289 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 290 | #include <syscalls/k_mutex_unlock_mrsh.c> |
Andrew Boie | 2f7519b | 2017-09-29 03:33:06 -0700 | [diff] [blame] | 291 | #endif |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 292 | |
| 293 | #ifdef CONFIG_OBJ_CORE_MUTEX |
| 294 | static int init_mutex_obj_core_list(void) |
| 295 | { |
| 296 | /* Initialize mutex object type */ |
| 297 | |
| 298 | z_obj_type_init(&obj_type_mutex, K_OBJ_TYPE_MUTEX_ID, |
| 299 | offsetof(struct k_mutex, obj_core)); |
| 300 | |
| 301 | /* Initialize and link statically defined mutexs */ |
| 302 | |
| 303 | STRUCT_SECTION_FOREACH(k_mutex, mutex) { |
| 304 | k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex); |
| 305 | } |
| 306 | |
| 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1, |
| 311 | CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); |
| 312 | #endif |