Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | #include <kernel.h> |
Ramakrishna Pallala | f603e60 | 2018-04-05 22:41:15 +0530 | [diff] [blame] | 8 | #include <ksched.h> |
| 9 | #include <wait_q.h> |
Ramakrishna Pallala | f603e60 | 2018-04-05 22:41:15 +0530 | [diff] [blame] | 10 | #include <posix/pthread.h> |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 11 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 12 | int64_t timespec_to_timeoutms(const struct timespec *abstime); |
Paul Sokolovsky | 68c7dc6 | 2019-08-27 15:37:49 +0300 | [diff] [blame] | 13 | |
Paul Sokolovsky | 0b63479 | 2020-05-05 11:23:55 +0300 | [diff] [blame] | 14 | static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, |
| 15 | k_timeout_t timeout) |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 16 | { |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 17 | __ASSERT(mut->lock_count == 1U, ""); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 18 | |
| 19 | int ret, key = irq_lock(); |
| 20 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 21 | mut->lock_count = 0U; |
Punit Vara | eb8ba69 | 2018-05-03 15:24:08 +0530 | [diff] [blame] | 22 | mut->owner = NULL; |
| 23 | _ready_one_thread(&mut->wait_q); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 24 | ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 25 | |
| 26 | /* FIXME: this extra lock (and the potential context switch it |
| 27 | * can cause) could be optimized out. At the point of the |
| 28 | * signal/broadcast, it's possible to detect whether or not we |
| 29 | * will be swapping back to this particular thread and lock it |
| 30 | * (i.e. leave the lock variable unchanged) on our behalf. |
| 31 | * But that requires putting scheduler intelligence into this |
| 32 | * higher level abstraction and is probably not worth it. |
| 33 | */ |
| 34 | pthread_mutex_lock(mut); |
| 35 | |
Youvedeep Singh | 325abfb | 2018-02-08 08:50:06 +0530 | [diff] [blame] | 36 | return ret == -EAGAIN ? ETIMEDOUT : ret; |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | /* This implements a "fair" scheduling policy: at the end of a POSIX |
| 40 | * thread call that might result in a change of the current maximum |
| 41 | * priority thread, we always check and context switch if needed. |
| 42 | * Note that there is significant dispute in the community over the |
| 43 | * "right" way to do this and different systems do it differently by |
| 44 | * default. Zephyr is an RTOS, so we choose latency over |
| 45 | * throughput. See here for a good discussion of the broad issue: |
| 46 | * |
| 47 | * https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/ |
| 48 | */ |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 49 | |
| 50 | int pthread_cond_signal(pthread_cond_t *cv) |
| 51 | { |
| 52 | int key = irq_lock(); |
| 53 | |
Andy Ross | 8a4b2e8 | 2018-04-11 08:21:26 -0700 | [diff] [blame] | 54 | _ready_one_thread(&cv->wait_q); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 55 | z_reschedule_irqlock(key); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 56 | |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | int pthread_cond_broadcast(pthread_cond_t *cv) |
| 61 | { |
| 62 | int key = irq_lock(); |
| 63 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 64 | while (z_waitq_head(&cv->wait_q)) { |
Andy Ross | 8a4b2e8 | 2018-04-11 08:21:26 -0700 | [diff] [blame] | 65 | _ready_one_thread(&cv->wait_q); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 68 | z_reschedule_irqlock(key); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 69 | |
| 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut) |
| 74 | { |
| 75 | return cond_wait(cv, mut, K_FOREVER); |
| 76 | } |
| 77 | |
| 78 | int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, |
Paul Sokolovsky | 68c7dc6 | 2019-08-27 15:37:49 +0300 | [diff] [blame] | 79 | const struct timespec *abstime) |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 80 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 81 | int32_t timeout = (int32_t)timespec_to_timeoutms(abstime); |
Paul Sokolovsky | 0b63479 | 2020-05-05 11:23:55 +0300 | [diff] [blame] | 82 | return cond_wait(cv, mut, K_MSEC(timeout)); |
Andy Ross | 53c8599 | 2017-07-24 14:59:55 -0700 | [diff] [blame] | 83 | } |