blob: 91d64970f360093a3295793775f429832c72b516 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
Anas Nashifcb888e62016-12-18 09:42:55 -050010 * @brief Kernel semaphore object.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040011 *
12 * The semaphores are of the 'counting' type, i.e. each 'give' operation will
Ramakrishna Pallalac44046a2017-10-25 09:19:25 -040013 * increment the internal count by 1, if no thread is pending on it. The 'init'
14 * call initializes the count to 'initial_count'. Following multiple 'give'
15 * operations, the same number of 'take' operations can be performed without
16 * the calling thread having to pend on the semaphore, or the calling task
17 * having to poll.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040018 */
19
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020020#include <zephyr/kernel.h>
21#include <zephyr/kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -040022
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020023#include <zephyr/toolchain.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000024#include <wait_q.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020025#include <zephyr/sys/dlist.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040026#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/init.h>
Anas Nashif4e396172023-09-26 22:46:01 +000028#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020029#include <zephyr/tracing/tracing.h>
30#include <zephyr/sys/check.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040031
Andy Rossda37a532018-07-24 14:12:36 -070032/* We use a system-wide lock to synchronize semaphores, which has
33 * unfortunate performance impact vs. using a per-object lock
34 * (semaphores are *very* widely used). But per-object locks require
35 * significant extra RAM. A properly spin-aware semaphore
36 * implementation would spin on atomic access to the count variable,
37 * and not a spinlock per se. Useful optimization for the future...
38 */
39static struct k_spinlock lock;
40
Peter Mitsis6df8efe2023-05-11 14:06:46 -040041#ifdef CONFIG_OBJ_CORE_SEM
42static struct k_obj_type obj_type_sem;
43#endif
44
Anas Nashif928af3c2019-05-04 10:36:14 -040045int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boiefc273c02017-09-23 12:51:23 -070046 unsigned int limit)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040047{
Anas Nashif928af3c2019-05-04 10:36:14 -040048 /*
49 * Limit cannot be zero and count cannot be greater than limit
50 */
James Harrisb1042812021-03-03 12:02:05 -080051 CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) {
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +010052 SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
53
Anas Nashif928af3c2019-05-04 10:36:14 -040054 return -EINVAL;
55 }
56
Benjamin Walsh456c6da2016-09-02 18:55:39 -040057 sem->count = initial_count;
58 sem->limit = limit;
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +010059
60 SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
61
Patrik Flykt4344e272019-03-08 14:19:05 -070062 z_waitq_init(&sem->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030063#if defined(CONFIG_POLL)
64 sys_dlist_init(&sem->poll_events);
65#endif
Anas Nashifc91cad72023-09-26 21:32:13 +000066 k_object_init(sem);
Anas Nashif928af3c2019-05-04 10:36:14 -040067
Peter Mitsis6df8efe2023-05-11 14:06:46 -040068#ifdef CONFIG_OBJ_CORE_SEM
69 k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
70#endif
71
Anas Nashif928af3c2019-05-04 10:36:14 -040072 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040073}
74
Andrew Boiefc273c02017-09-23 12:51:23 -070075#ifdef CONFIG_USERSPACE
Anas Nashif928af3c2019-05-04 10:36:14 -040076int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andy Ross65649742019-08-06 13:34:31 -070077 unsigned int limit)
Andrew Boiefc273c02017-09-23 12:51:23 -070078{
Andrew Boie8345e5e2018-05-04 15:57:57 -070079 Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
Anas Nashif928af3c2019-05-04 10:36:14 -040080 return z_impl_k_sem_init(sem, initial_count, limit);
Andrew Boiefc273c02017-09-23 12:51:23 -070081}
Andy Ross65649742019-08-06 13:34:31 -070082#include <syscalls/k_sem_init_mrsh.c>
Andrew Boiefc273c02017-09-23 12:51:23 -070083#endif
Peter Mitsis45403672016-09-09 14:24:06 -040084
Peter Mitsisf444fca2023-04-26 16:27:44 -040085static inline bool handle_poll_events(struct k_sem *sem)
Benjamin Walshacc68c12017-01-29 18:57:45 -050086{
87#ifdef CONFIG_POLL
Patrik Flykt4344e272019-03-08 14:19:05 -070088 z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
Peter Mitsisf444fca2023-04-26 16:27:44 -040089 return true;
Adithya Baglody4b066212018-10-16 11:59:12 +053090#else
91 ARG_UNUSED(sem);
Peter Mitsisf444fca2023-04-26 16:27:44 -040092 return false;
Benjamin Walshacc68c12017-01-29 18:57:45 -050093#endif
94}
95
Anas Nashif5076a832019-11-26 08:56:25 -050096void z_impl_k_sem_give(struct k_sem *sem)
Peter Mitsis45403672016-09-09 14:24:06 -040097{
Anas Nashif5076a832019-11-26 08:56:25 -050098 k_spinlock_key_t key = k_spin_lock(&lock);
Anas Nashif390537b2020-08-02 23:34:14 -040099 struct k_thread *thread;
Peter Mitsisf444fca2023-04-26 16:27:44 -0400100 bool resched = true;
Benjamin Walsh2e0bf3a2017-02-12 16:54:59 -0500101
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100102 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
103
Anas Nashif390537b2020-08-02 23:34:14 -0400104 thread = z_unpend_first_thread(&sem->wait_q);
Anas Nashif5076a832019-11-26 08:56:25 -0500105
Flavio Ceolinea716bf2018-09-20 16:30:45 -0700106 if (thread != NULL) {
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800107 arch_thread_return_value_set(thread, 0);
Andy Rossb8ff63e2020-01-23 12:55:04 -0800108 z_ready_thread(thread);
Andy Ross8606fab2018-03-26 10:54:40 -0700109 } else {
Anas Nashif5076a832019-11-26 08:56:25 -0500110 sem->count += (sem->count != sem->limit) ? 1U : 0U;
Peter Mitsisf444fca2023-04-26 16:27:44 -0400111 resched = handle_poll_events(sem);
Benjamin Walsh2e0bf3a2017-02-12 16:54:59 -0500112 }
Peter Mitsis45403672016-09-09 14:24:06 -0400113
Peter Mitsisf444fca2023-04-26 16:27:44 -0400114 if (resched) {
115 z_reschedule(&lock, key);
116 } else {
117 k_spin_unlock(&lock, key);
118 }
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100119
120 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
Peter Mitsis45403672016-09-09 14:24:06 -0400121}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400122
Andrew Boiefc273c02017-09-23 12:51:23 -0700123#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700124static inline void z_vrfy_k_sem_give(struct k_sem *sem)
125{
126 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
127 z_impl_k_sem_give(sem);
128}
129#include <syscalls/k_sem_give_mrsh.c>
Andrew Boie225e4c02017-10-12 09:54:26 -0700130#endif
Andrew Boiefc273c02017-09-23 12:51:23 -0700131
Andy Ross78327382020-03-05 15:18:14 -0800132int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400133{
Anas Nashif5076a832019-11-26 08:56:25 -0500134 int ret = 0;
135
Andy Ross78327382020-03-05 15:18:14 -0800136 __ASSERT(((arch_is_in_isr() == false) ||
137 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400138
Andy Rossda37a532018-07-24 14:12:36 -0700139 k_spinlock_key_t key = k_spin_lock(&lock);
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100140
141 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
142
Adithya Baglody4b066212018-10-16 11:59:12 +0530143 if (likely(sem->count > 0U)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400144 sem->count--;
Andy Rossda37a532018-07-24 14:12:36 -0700145 k_spin_unlock(&lock, key);
Anas Nashif5076a832019-11-26 08:56:25 -0500146 ret = 0;
147 goto out;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400148 }
149
Andy Ross78327382020-03-05 15:18:14 -0800150 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Rossda37a532018-07-24 14:12:36 -0700151 k_spin_unlock(&lock, key);
Anas Nashif5076a832019-11-26 08:56:25 -0500152 ret = -EBUSY;
153 goto out;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400154 }
155
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100156 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
157
Anas Nashif5076a832019-11-26 08:56:25 -0500158 ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
Anas Nashifb6304e62018-07-04 08:03:03 -0500159
Anas Nashif5076a832019-11-26 08:56:25 -0500160out:
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100161 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
162
Andy Rossda37a532018-07-24 14:12:36 -0700163 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400164}
Andrew Boiefc273c02017-09-23 12:51:23 -0700165
James Harris53b81792021-03-04 15:47:27 -0800166void z_impl_k_sem_reset(struct k_sem *sem)
167{
168 struct k_thread *thread;
169 k_spinlock_key_t key = k_spin_lock(&lock);
170
171 while (true) {
172 thread = z_unpend_first_thread(&sem->wait_q);
173 if (thread == NULL) {
174 break;
175 }
176 arch_thread_return_value_set(thread, -EAGAIN);
177 z_ready_thread(thread);
178 }
179 sem->count = 0;
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100180
181 SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
182
James Harris53b81792021-03-04 15:47:27 -0800183 handle_poll_events(sem);
184
185 z_reschedule(&lock, key);
186}
187
Andrew Boiefc273c02017-09-23 12:51:23 -0700188#ifdef CONFIG_USERSPACE
Andy Ross78327382020-03-05 15:18:14 -0800189static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Andrew Boiefc273c02017-09-23 12:51:23 -0700190{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700191 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
Patrik Flykt4344e272019-03-08 14:19:05 -0700192 return z_impl_k_sem_take((struct k_sem *)sem, timeout);
Andrew Boiefc273c02017-09-23 12:51:23 -0700193}
Andy Ross65649742019-08-06 13:34:31 -0700194#include <syscalls/k_sem_take_mrsh.c>
Andrew Boiefc273c02017-09-23 12:51:23 -0700195
Andy Ross65649742019-08-06 13:34:31 -0700196static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
197{
198 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
199 z_impl_k_sem_reset(sem);
200}
201#include <syscalls/k_sem_reset_mrsh.c>
202
203static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
204{
205 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
206 return z_impl_k_sem_count_get(sem);
207}
208#include <syscalls/k_sem_count_get_mrsh.c>
209
Andrew Boie225e4c02017-10-12 09:54:26 -0700210#endif
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400211
212#ifdef CONFIG_OBJ_CORE_SEM
213static int init_sem_obj_core_list(void)
214{
215 /* Initialize semaphore object type */
216
217 z_obj_type_init(&obj_type_sem, K_OBJ_TYPE_SEM_ID,
218 offsetof(struct k_sem, obj_core));
219
220 /* Initialize and link statically defined semaphores */
221
222 STRUCT_SECTION_FOREACH(k_sem, sem) {
223 k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
224 }
225
226 return 0;
227}
228
229SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
230 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
231#endif