freertos: fix for_at_least contract to add one tick
Fixes the FreeRTOS backends for pw::sync::Mutex,
pw::sync::BinarySemaphore, pw::sync::CountingSemaphore, and
pw::this_thread::sleep_for to add one tick when invoking the native
API to comply with the for_at_least account as we do not know how
far we are into the current tick.
Note that this is not observable without the use of an independent
clock.
Change-Id: Ibae4fa8b0118470d55d42eebab7bfafc2985aba7
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/36601
Reviewed-by: Wyatt Hepler <hepler@google.com>
Commit-Queue: Ewout van Bekkum <ewout@google.com>
diff --git a/pw_sync_freertos/binary_semaphore.cc b/pw_sync_freertos/binary_semaphore.cc
index 0d59a25..680eb11 100644
--- a/pw_sync_freertos/binary_semaphore.cc
+++ b/pw_sync_freertos/binary_semaphore.cc
@@ -24,28 +24,36 @@
#include "semphr.h"
using pw::chrono::SystemClock;
-using pw::chrono::freertos::kMaxTimeout;
namespace pw::sync {
namespace {
-static_assert(configSUPPORT_STATIC_ALLOCATION != 0);
+static_assert(configSUPPORT_STATIC_ALLOCATION != 0,
+ "FreeRTOS static allocations are required for this backend.");
} // namespace
bool BinarySemaphore::try_acquire_for(SystemClock::duration for_at_least) {
PW_DCHECK(!interrupt::InInterruptContext());
- // Clamp negative durations to be 0 which maps to non-blocking.
- for_at_least = std::max(for_at_least, SystemClock::duration::zero());
+ // Use non-blocking try_acquire for negative durations.
+ if (for_at_least < SystemClock::duration::zero()) {
+ return try_acquire();
+ }
- while (for_at_least > kMaxTimeout) {
- if (xSemaphoreTake(native_type_.handle, kMaxTimeout.count()) == pdTRUE) {
+ // On a tick based kernel we cannot tell how far along we are on the current
+ // tick, ergo we add one whole tick to the final duration.
+ constexpr SystemClock::duration kMaxTimeoutMinusOne =
+ pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
+ while (for_at_least > kMaxTimeoutMinusOne) {
+ if (xSemaphoreTake(native_type_.handle, kMaxTimeoutMinusOne.count()) ==
+ pdTRUE) {
return true;
}
- for_at_least -= kMaxTimeout;
+ for_at_least -= kMaxTimeoutMinusOne;
}
- return xSemaphoreTake(native_type_.handle, for_at_least.count()) == pdTRUE;
+ return xSemaphoreTake(native_type_.handle, for_at_least.count() + 1) ==
+ pdTRUE;
}
} // namespace pw::sync
diff --git a/pw_sync_freertos/counting_semaphore.cc b/pw_sync_freertos/counting_semaphore.cc
index 33b2705..0ed7a10 100644
--- a/pw_sync_freertos/counting_semaphore.cc
+++ b/pw_sync_freertos/counting_semaphore.cc
@@ -24,7 +24,6 @@
#include "semphr.h"
using pw::chrono::SystemClock;
-using pw::chrono::freertos::kMaxTimeout;
namespace pw::sync {
namespace {
@@ -32,7 +31,8 @@
static_assert(configUSE_COUNTING_SEMAPHORES != 0,
"FreeRTOS counting semaphores aren't enabled.");
-static_assert(configSUPPORT_STATIC_ALLOCATION != 0);
+static_assert(configSUPPORT_STATIC_ALLOCATION != 0,
+ "FreeRTOS static allocations are required for this backend.");
} // namespace
@@ -55,15 +55,25 @@
bool CountingSemaphore::try_acquire_for(SystemClock::duration for_at_least) {
PW_DCHECK(!interrupt::InInterruptContext());
- while (for_at_least > kMaxTimeout) {
- if (xSemaphoreTake(native_type_.handle, kMaxTimeout.count()) == pdTRUE) {
+
+ // Use non-blocking try_acquire for negative durations.
+ if (for_at_least < SystemClock::duration::zero()) {
+ return try_acquire();
+ }
+
+ // On a tick based kernel we cannot tell how far along we are on the current
+ // tick, ergo we add one whole tick to the final duration.
+ constexpr SystemClock::duration kMaxTimeoutMinusOne =
+ pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
+ while (for_at_least > kMaxTimeoutMinusOne) {
+ if (xSemaphoreTake(native_type_.handle, kMaxTimeoutMinusOne.count()) ==
+ pdTRUE) {
return true;
}
- for_at_least -= kMaxTimeout;
+ for_at_least -= kMaxTimeoutMinusOne;
}
- // Clamp negative durations to be 0 which maps to non-blocking.
- for_at_least = std::max(for_at_least, SystemClock::duration::zero());
- return xSemaphoreTake(native_type_.handle, for_at_least.count()) == pdTRUE;
+ return xSemaphoreTake(native_type_.handle, for_at_least.count() + 1) ==
+ pdTRUE;
}
} // namespace pw::sync
diff --git a/pw_sync_freertos/mutex.cc b/pw_sync_freertos/mutex.cc
index 52333a1..5ef6fc3 100644
--- a/pw_sync_freertos/mutex.cc
+++ b/pw_sync_freertos/mutex.cc
@@ -24,30 +24,38 @@
#include "semphr.h"
using pw::chrono::SystemClock;
-using pw::chrono::freertos::kMaxTimeout;
namespace pw::sync {
namespace {
static_assert(configUSE_MUTEXES != 0, "FreeRTOS mutexes aren't enabled.");
-static_assert(configSUPPORT_STATIC_ALLOCATION != 0);
+static_assert(configSUPPORT_STATIC_ALLOCATION != 0,
+ "FreeRTOS static allocations are required for this backend.");
} // namespace
bool Mutex::try_lock_for(SystemClock::duration for_at_least) {
PW_DCHECK(!interrupt::InInterruptContext());
- // Clamp negative durations to be 0 which maps to non-blocking.
- for_at_least = std::max(for_at_least, SystemClock::duration::zero());
+ // Use non-blocking try_lock for negative durations.
+ if (for_at_least < SystemClock::duration::zero()) {
+ return try_lock();
+ }
- while (for_at_least > kMaxTimeout) {
- if (xSemaphoreTake(native_type_.handle, kMaxTimeout.count()) == pdTRUE) {
+ // On a tick based kernel we cannot tell how far along we are on the current
+ // tick, ergo we add one whole tick to the final duration.
+ constexpr SystemClock::duration kMaxTimeoutMinusOne =
+ pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
+ while (for_at_least > kMaxTimeoutMinusOne) {
+ if (xSemaphoreTake(native_type_.handle, kMaxTimeoutMinusOne.count()) ==
+ pdTRUE) {
return true;
}
- for_at_least -= kMaxTimeout;
+ for_at_least -= kMaxTimeoutMinusOne;
}
- return xSemaphoreTake(native_type_.handle, for_at_least.count()) == pdTRUE;
+ return xSemaphoreTake(native_type_.handle, for_at_least.count() + 1) ==
+ pdTRUE;
}
} // namespace pw::sync
diff --git a/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h b/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
index d666080..1f0037f 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
@@ -76,6 +76,8 @@
inline bool BinarySemaphore::try_acquire_until(
chrono::SystemClock::time_point until_at_least) {
+ // Note that if this deadline is in the future, it will get rounded up by
+ // one whole tick due to how try_acquire_for is implemented.
return try_acquire_for(until_at_least - chrono::SystemClock::now());
}
diff --git a/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h b/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
index 0dc116d..8fdf584 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
@@ -63,6 +63,8 @@
inline bool CountingSemaphore::try_acquire_until(
chrono::SystemClock::time_point until_at_least) {
+ // Note that if this deadline is in the future, it will get rounded up by
+ // one whole tick due to how try_acquire_for is implemented.
return try_acquire_for(until_at_least - chrono::SystemClock::now());
}
diff --git a/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h b/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
index 4c8fe6c..31bcf14 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
@@ -52,6 +52,8 @@
inline bool Mutex::try_lock_until(
chrono::SystemClock::time_point until_at_least) {
+ // Note that if this deadline is in the future, it will get rounded up by
+ // one whole tick due to how try_lock_for is implemented.
return try_lock_for(until_at_least - chrono::SystemClock::now());
}
diff --git a/pw_thread_freertos/public/pw_thread_freertos/sleep_inline.h b/pw_thread_freertos/public/pw_thread_freertos/sleep_inline.h
index dfd08a8..f23fdc9 100644
--- a/pw_thread_freertos/public/pw_thread_freertos/sleep_inline.h
+++ b/pw_thread_freertos/public/pw_thread_freertos/sleep_inline.h
@@ -19,6 +19,8 @@
// TODO(ewout): Consider optional vTaskDelayUntil support to minimize slop.
inline void sleep_until(chrono::SystemClock::time_point until_at_least) {
+ // Note that if this deadline is in the future, it will get rounded up by
+ // one whole tick due to how sleep_for is implemented.
return sleep_for(until_at_least - chrono::SystemClock::now());
}
diff --git a/pw_thread_freertos/sleep.cc b/pw_thread_freertos/sleep.cc
index 88b7a5b..3f64ce1 100644
--- a/pw_thread_freertos/sleep.cc
+++ b/pw_thread_freertos/sleep.cc
@@ -23,21 +23,28 @@
#include "pw_thread/id.h"
#include "task.h"
-using pw::chrono::freertos::kMaxTimeout;
+using pw::chrono::SystemClock;
namespace pw::this_thread {
-void sleep_for(chrono::SystemClock::duration for_at_least) {
+void sleep_for(SystemClock::duration for_at_least) {
PW_DCHECK(get_id() != thread::Id());
- // Clamp negative durations to be 0 which maps to yield with vTaskDelay.
- for_at_least = std::max(for_at_least, chrono::SystemClock::duration::zero());
-
- while (for_at_least > kMaxTimeout) {
- vTaskDelay(kMaxTimeout.count());
- for_at_least -= kMaxTimeout;
+ // Yield for negative durations.
+ if (for_at_least < SystemClock::duration::zero()) {
+ taskYIELD();
+ return;
}
- vTaskDelay(for_at_least.count());
+
+ // On a tick based kernel we cannot tell how far along we are on the current
+ // tick, ergo we add one whole tick to the final duration.
+ constexpr SystemClock::duration kMaxTimeoutMinusOne =
+ pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
+ while (for_at_least > kMaxTimeoutMinusOne) {
+ vTaskDelay(kMaxTimeoutMinusOne.count());
+ for_at_least -= kMaxTimeoutMinusOne;
+ }
+ vTaskDelay(for_at_least.count() + 1);
}
} // namespace pw::this_thread