blob: e84f270cc3a45a8c9fd9ef6799232e1ef642986f [file] [log] [blame]
Andy Ross53c85992017-07-24 14:59:55 -07001/*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <kernel.h>
Ramakrishna Pallalaf603e602018-04-05 22:41:15 +05308#include <ksched.h>
9#include <wait_q.h>
Ramakrishna Pallalaf603e602018-04-05 22:41:15 +053010#include <posix/pthread.h>
Andy Ross53c85992017-07-24 14:59:55 -070011
Kumar Galaa1b77fd2020-05-27 11:26:57 -050012int64_t timespec_to_timeoutms(const struct timespec *abstime);
Paul Sokolovsky68c7dc62019-08-27 15:37:49 +030013
Paul Sokolovsky0b634792020-05-05 11:23:55 +030014static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
15 k_timeout_t timeout)
Andy Ross53c85992017-07-24 14:59:55 -070016{
Patrik Flykt24d71432019-03-26 19:57:45 -060017 __ASSERT(mut->lock_count == 1U, "");
Andy Ross53c85992017-07-24 14:59:55 -070018
19 int ret, key = irq_lock();
20
Patrik Flykt24d71432019-03-26 19:57:45 -060021 mut->lock_count = 0U;
Punit Varaeb8ba692018-05-03 15:24:08 +053022 mut->owner = NULL;
23 _ready_one_thread(&mut->wait_q);
Patrik Flykt4344e272019-03-08 14:19:05 -070024 ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout);
Andy Ross53c85992017-07-24 14:59:55 -070025
26 /* FIXME: this extra lock (and the potential context switch it
27 * can cause) could be optimized out. At the point of the
28 * signal/broadcast, it's possible to detect whether or not we
29 * will be swapping back to this particular thread and lock it
30 * (i.e. leave the lock variable unchanged) on our behalf.
31 * But that requires putting scheduler intelligence into this
32 * higher level abstraction and is probably not worth it.
33 */
34 pthread_mutex_lock(mut);
35
Youvedeep Singh325abfb2018-02-08 08:50:06 +053036 return ret == -EAGAIN ? ETIMEDOUT : ret;
Andy Ross53c85992017-07-24 14:59:55 -070037}
38
39/* This implements a "fair" scheduling policy: at the end of a POSIX
40 * thread call that might result in a change of the current maximum
41 * priority thread, we always check and context switch if needed.
42 * Note that there is significant dispute in the community over the
43 * "right" way to do this and different systems do it differently by
44 * default. Zephyr is an RTOS, so we choose latency over
45 * throughput. See here for a good discussion of the broad issue:
46 *
47 * https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
48 */
Andy Ross53c85992017-07-24 14:59:55 -070049
50int pthread_cond_signal(pthread_cond_t *cv)
51{
52 int key = irq_lock();
53
Andy Ross8a4b2e82018-04-11 08:21:26 -070054 _ready_one_thread(&cv->wait_q);
Patrik Flykt4344e272019-03-08 14:19:05 -070055 z_reschedule_irqlock(key);
Andy Ross53c85992017-07-24 14:59:55 -070056
57 return 0;
58}
59
60int pthread_cond_broadcast(pthread_cond_t *cv)
61{
62 int key = irq_lock();
63
Patrik Flykt4344e272019-03-08 14:19:05 -070064 while (z_waitq_head(&cv->wait_q)) {
Andy Ross8a4b2e82018-04-11 08:21:26 -070065 _ready_one_thread(&cv->wait_q);
Andy Ross53c85992017-07-24 14:59:55 -070066 }
67
Patrik Flykt4344e272019-03-08 14:19:05 -070068 z_reschedule_irqlock(key);
Andy Ross53c85992017-07-24 14:59:55 -070069
70 return 0;
71}
72
73int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
74{
75 return cond_wait(cv, mut, K_FOREVER);
76}
77
78int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut,
Paul Sokolovsky68c7dc62019-08-27 15:37:49 +030079 const struct timespec *abstime)
Andy Ross53c85992017-07-24 14:59:55 -070080{
Kumar Galaa1b77fd2020-05-27 11:26:57 -050081 int32_t timeout = (int32_t)timespec_to_timeoutms(abstime);
Paul Sokolovsky0b634792020-05-05 11:23:55 +030082 return cond_wait(cv, mut, K_MSEC(timeout));
Andy Ross53c85992017-07-24 14:59:55 -070083}