blob: ee2d3079f7c8b05f20ac941927a2c7d62d9035a5 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
Anas Nashifcb888e62016-12-18 09:42:55 -050010 * @brief Kernel semaphore object.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040011 *
12 * The semaphores are of the 'counting' type, i.e. each 'give' operation will
Ramakrishna Pallalac44046a2017-10-25 09:19:25 -040013 * increment the internal count by 1, if no thread is pending on it. The 'init'
14 * call initializes the count to 'initial_count'. Following multiple 'give'
15 * operations, the same number of 'take' operations can be performed without
16 * the calling thread having to pend on the semaphore, or the calling task
17 * having to poll.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040018 */
19
20#include <kernel.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050021#include <kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -040022
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023#include <toolchain.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040024#include <wait_q.h>
Anas Nashifee9dd1a2019-06-26 10:33:41 -040025#include <sys/dlist.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040026#include <ksched.h>
Allan Stephense7d2cc22016-10-19 16:10:46 -050027#include <init.h>
Andrew Boiefc273c02017-09-23 12:51:23 -070028#include <syscall_handler.h>
Anas Nashif73008b42020-02-06 09:14:51 -050029#include <tracing/tracing.h>
Anas Nashif928af3c2019-05-04 10:36:14 -040030#include <sys/check.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040031
Andy Rossda37a532018-07-24 14:12:36 -070032/* We use a system-wide lock to synchronize semaphores, which has
33 * unfortunate performance impact vs. using a per-object lock
34 * (semaphores are *very* widely used). But per-object locks require
35 * significant extra RAM. A properly spin-aware semaphore
36 * implementation would spin on atomic access to the count variable,
37 * and not a spinlock per se. Useful optimization for the future...
38 */
39static struct k_spinlock lock;
40
Anas Nashif928af3c2019-05-04 10:36:14 -040041int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boiefc273c02017-09-23 12:51:23 -070042 unsigned int limit)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040043{
Anas Nashif928af3c2019-05-04 10:36:14 -040044 /*
45 * Limit cannot be zero and count cannot be greater than limit
46 */
James Harrisb1042812021-03-03 12:02:05 -080047 CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) {
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +010048 SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
49
Anas Nashif928af3c2019-05-04 10:36:14 -040050 return -EINVAL;
51 }
52
Benjamin Walsh456c6da2016-09-02 18:55:39 -040053 sem->count = initial_count;
54 sem->limit = limit;
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +010055
56 SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
57
Patrik Flykt4344e272019-03-08 14:19:05 -070058 z_waitq_init(&sem->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030059#if defined(CONFIG_POLL)
60 sys_dlist_init(&sem->poll_events);
61#endif
Patrik Flykt4344e272019-03-08 14:19:05 -070062 z_object_init(sem);
Anas Nashif928af3c2019-05-04 10:36:14 -040063
64 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040065}
66
Andrew Boiefc273c02017-09-23 12:51:23 -070067#ifdef CONFIG_USERSPACE
Anas Nashif928af3c2019-05-04 10:36:14 -040068int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andy Ross65649742019-08-06 13:34:31 -070069 unsigned int limit)
Andrew Boiefc273c02017-09-23 12:51:23 -070070{
Andrew Boie8345e5e2018-05-04 15:57:57 -070071 Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
Anas Nashif928af3c2019-05-04 10:36:14 -040072 return z_impl_k_sem_init(sem, initial_count, limit);
Andrew Boiefc273c02017-09-23 12:51:23 -070073}
Andy Ross65649742019-08-06 13:34:31 -070074#include <syscalls/k_sem_init_mrsh.c>
Andrew Boiefc273c02017-09-23 12:51:23 -070075#endif
Peter Mitsis45403672016-09-09 14:24:06 -040076
Andy Ross8606fab2018-03-26 10:54:40 -070077static inline void handle_poll_events(struct k_sem *sem)
Benjamin Walshacc68c12017-01-29 18:57:45 -050078{
79#ifdef CONFIG_POLL
Patrik Flykt4344e272019-03-08 14:19:05 -070080 z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
Adithya Baglody4b066212018-10-16 11:59:12 +053081#else
82 ARG_UNUSED(sem);
Benjamin Walshacc68c12017-01-29 18:57:45 -050083#endif
84}
85
Anas Nashif5076a832019-11-26 08:56:25 -050086void z_impl_k_sem_give(struct k_sem *sem)
Peter Mitsis45403672016-09-09 14:24:06 -040087{
Anas Nashif5076a832019-11-26 08:56:25 -050088 k_spinlock_key_t key = k_spin_lock(&lock);
Anas Nashif390537b2020-08-02 23:34:14 -040089 struct k_thread *thread;
Benjamin Walsh2e0bf3a2017-02-12 16:54:59 -050090
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +010091 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
92
Anas Nashif390537b2020-08-02 23:34:14 -040093 thread = z_unpend_first_thread(&sem->wait_q);
Anas Nashif5076a832019-11-26 08:56:25 -050094
Flavio Ceolinea716bf2018-09-20 16:30:45 -070095 if (thread != NULL) {
Andrew Boie4f77c2a2019-11-07 12:43:29 -080096 arch_thread_return_value_set(thread, 0);
Andy Rossb8ff63e2020-01-23 12:55:04 -080097 z_ready_thread(thread);
Andy Ross8606fab2018-03-26 10:54:40 -070098 } else {
Anas Nashif5076a832019-11-26 08:56:25 -050099 sem->count += (sem->count != sem->limit) ? 1U : 0U;
Andy Ross8606fab2018-03-26 10:54:40 -0700100 handle_poll_events(sem);
Benjamin Walsh2e0bf3a2017-02-12 16:54:59 -0500101 }
Peter Mitsis45403672016-09-09 14:24:06 -0400102
Patrik Flykt4344e272019-03-08 14:19:05 -0700103 z_reschedule(&lock, key);
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100104
105 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
Peter Mitsis45403672016-09-09 14:24:06 -0400106}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107
Andrew Boiefc273c02017-09-23 12:51:23 -0700108#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700109static inline void z_vrfy_k_sem_give(struct k_sem *sem)
110{
111 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
112 z_impl_k_sem_give(sem);
113}
114#include <syscalls/k_sem_give_mrsh.c>
Andrew Boie225e4c02017-10-12 09:54:26 -0700115#endif
Andrew Boiefc273c02017-09-23 12:51:23 -0700116
Andy Ross78327382020-03-05 15:18:14 -0800117int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400118{
Anas Nashif5076a832019-11-26 08:56:25 -0500119 int ret = 0;
120
Andy Ross78327382020-03-05 15:18:14 -0800121 __ASSERT(((arch_is_in_isr() == false) ||
122 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400123
Andy Rossda37a532018-07-24 14:12:36 -0700124 k_spinlock_key_t key = k_spin_lock(&lock);
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100125
126 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
127
Adithya Baglody4b066212018-10-16 11:59:12 +0530128 if (likely(sem->count > 0U)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400129 sem->count--;
Andy Rossda37a532018-07-24 14:12:36 -0700130 k_spin_unlock(&lock, key);
Anas Nashif5076a832019-11-26 08:56:25 -0500131 ret = 0;
132 goto out;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400133 }
134
Andy Ross78327382020-03-05 15:18:14 -0800135 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Rossda37a532018-07-24 14:12:36 -0700136 k_spin_unlock(&lock, key);
Anas Nashif5076a832019-11-26 08:56:25 -0500137 ret = -EBUSY;
138 goto out;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400139 }
140
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100141 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
142
Anas Nashif5076a832019-11-26 08:56:25 -0500143 ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
Anas Nashifb6304e62018-07-04 08:03:03 -0500144
Anas Nashif5076a832019-11-26 08:56:25 -0500145out:
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100146 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
147
Andy Rossda37a532018-07-24 14:12:36 -0700148 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400149}
Andrew Boiefc273c02017-09-23 12:51:23 -0700150
James Harris53b81792021-03-04 15:47:27 -0800151void z_impl_k_sem_reset(struct k_sem *sem)
152{
153 struct k_thread *thread;
154 k_spinlock_key_t key = k_spin_lock(&lock);
155
156 while (true) {
157 thread = z_unpend_first_thread(&sem->wait_q);
158 if (thread == NULL) {
159 break;
160 }
161 arch_thread_return_value_set(thread, -EAGAIN);
162 z_ready_thread(thread);
163 }
164 sem->count = 0;
Torbjörn Leksellfcf2fb62021-03-26 09:51:03 +0100165
166 SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
167
James Harris53b81792021-03-04 15:47:27 -0800168 handle_poll_events(sem);
169
170 z_reschedule(&lock, key);
171}
172
Andrew Boiefc273c02017-09-23 12:51:23 -0700173#ifdef CONFIG_USERSPACE
Andy Ross78327382020-03-05 15:18:14 -0800174static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Andrew Boiefc273c02017-09-23 12:51:23 -0700175{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700176 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
Patrik Flykt4344e272019-03-08 14:19:05 -0700177 return z_impl_k_sem_take((struct k_sem *)sem, timeout);
Andrew Boiefc273c02017-09-23 12:51:23 -0700178}
Andy Ross65649742019-08-06 13:34:31 -0700179#include <syscalls/k_sem_take_mrsh.c>
Andrew Boiefc273c02017-09-23 12:51:23 -0700180
Andy Ross65649742019-08-06 13:34:31 -0700181static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
182{
183 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
184 z_impl_k_sem_reset(sem);
185}
186#include <syscalls/k_sem_reset_mrsh.c>
187
188static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
189{
190 Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
191 return z_impl_k_sem_count_get(sem);
192}
193#include <syscalls/k_sem_count_get_mrsh.c>
194
Andrew Boie225e4c02017-10-12 09:54:26 -0700195#endif