pw_sync_backends: Expand comments in the implementations

Expands comments in the backend implementations to better explain
why things are done the way they are as a reference for future
backends.

Also updates IRQ contract asserts to consistently use debug asserts.

Change-Id: Ie00332826f8fcbeeac4afcf4b757008ab74313fb
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/67303
Commit-Queue: Ewout van Bekkum <ewout@google.com>
Pigweed-Auto-Submit: Ewout van Bekkum <ewout@google.com>
Reviewed-by: Keir Mierle <keir@google.com>
diff --git a/pw_sync_embos/binary_semaphore.cc b/pw_sync_embos/binary_semaphore.cc
index ecacd4c..9c1ce91 100644
--- a/pw_sync_embos/binary_semaphore.cc
+++ b/pw_sync_embos/binary_semaphore.cc
@@ -27,6 +27,7 @@
 namespace pw::sync {
 
 bool BinarySemaphore::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::BinarySemaphore IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_acquire for negative and zero length durations.
@@ -34,8 +35,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // embOS API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::embos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -45,6 +49,9 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   return OS_WaitCSemaTimed(&native_type_,
                            static_cast<OS_TIME>(timeout.count() + 1));
 }
diff --git a/pw_sync_embos/counting_semaphore.cc b/pw_sync_embos/counting_semaphore.cc
index 6390426..edd4951 100644
--- a/pw_sync_embos/counting_semaphore.cc
+++ b/pw_sync_embos/counting_semaphore.cc
@@ -41,6 +41,7 @@
 }
 
 bool CountingSemaphore::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::CountingSemaphore IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_acquire for negative and zero length durations.
@@ -48,8 +49,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // embOS API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::embos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -59,6 +63,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   return OS_WaitCSemaTimed(&native_type_,
                            static_cast<OS_TIME>(timeout.count() + 1));
 }
diff --git a/pw_sync_embos/public/pw_sync_embos/binary_semaphore_inline.h b/pw_sync_embos/public/pw_sync_embos/binary_semaphore_inline.h
index 69fb4f7..943b65e 100644
--- a/pw_sync_embos/public/pw_sync_embos/binary_semaphore_inline.h
+++ b/pw_sync_embos/public/pw_sync_embos/binary_semaphore_inline.h
@@ -31,7 +31,8 @@
 inline void BinarySemaphore::release() { OS_SignalCSemaMax(&native_type_, 1); }
 
 inline void BinarySemaphore::acquire() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::BinarySemaphore IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   OS_WaitCSema(&native_type_);
 }
 
diff --git a/pw_sync_embos/public/pw_sync_embos/counting_semaphore_inline.h b/pw_sync_embos/public/pw_sync_embos/counting_semaphore_inline.h
index 15ac132..8f29d4e 100644
--- a/pw_sync_embos/public/pw_sync_embos/counting_semaphore_inline.h
+++ b/pw_sync_embos/public/pw_sync_embos/counting_semaphore_inline.h
@@ -31,7 +31,8 @@
 }
 
 inline void CountingSemaphore::acquire() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::CountingSemaphore IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   OS_WaitCSema(&native_type_);
 }
 
diff --git a/pw_sync_embos/public/pw_sync_embos/mutex_inline.h b/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
index d902f6b..68c94bc 100644
--- a/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
+++ b/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
@@ -25,18 +25,21 @@
 inline Mutex::~Mutex() { OS_DeleteRSema(&native_type_); }
 
 inline void Mutex::lock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   const int lock_count = OS_Use(&native_type_);
   PW_ASSERT(lock_count == 1);  // Recursive locking is not permitted.
 }
 
 inline bool Mutex::try_lock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   return OS_Request(&native_type_) != 0;
 }
 
 inline void Mutex::unlock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   OS_Unuse(&native_type_);
 }
 
diff --git a/pw_sync_embos/timed_mutex.cc b/pw_sync_embos/timed_mutex.cc
index 4392ab5..92cac1b 100644
--- a/pw_sync_embos/timed_mutex.cc
+++ b/pw_sync_embos/timed_mutex.cc
@@ -27,6 +27,7 @@
 namespace pw::sync {
 
 bool TimedMutex::try_lock_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::TimedMutex IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_lock for negative and zero length durations.
@@ -34,8 +35,11 @@
     return try_lock();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // embOS API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::embos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -47,6 +51,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   const int lock_count =
       OS_UseTimed(&native_handle(), static_cast<OS_TIME>(timeout.count() + 1));
   PW_CHECK_UINT_LE(1, lock_count, "Recursive locking is not permitted");
diff --git a/pw_sync_freertos/binary_semaphore.cc b/pw_sync_freertos/binary_semaphore.cc
index 39b300d..9b27d5b 100644
--- a/pw_sync_freertos/binary_semaphore.cc
+++ b/pw_sync_freertos/binary_semaphore.cc
@@ -34,6 +34,7 @@
 }  // namespace
 
 bool BinarySemaphore::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::BinarySemaphore IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_acquire for negative and zero length durations.
@@ -41,8 +42,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // FreeRTOS API, we repeatedly wait with shorter durations. Note that on a
+  // tick based kernel we cannot tell how far along we are on the current tick,
+  // ergo we add one whole tick to the final duration. However, this also means
+  // that the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -53,6 +57,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   return xSemaphoreTake(reinterpret_cast<SemaphoreHandle_t>(&native_type_),
                         static_cast<TickType_t>(timeout.count() + 1)) == pdTRUE;
 }
diff --git a/pw_sync_freertos/counting_semaphore.cc b/pw_sync_freertos/counting_semaphore.cc
index 06e3255..8845cb0 100644
--- a/pw_sync_freertos/counting_semaphore.cc
+++ b/pw_sync_freertos/counting_semaphore.cc
@@ -56,6 +56,7 @@
 }
 
 bool CountingSemaphore::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::CountingSemaphore IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_acquire for negative and zero length durations.
@@ -63,8 +64,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // FreeRTOS API, we repeatedly wait with shorter durations. Note that on a
+  // tick based kernel we cannot tell how far along we are on the current tick,
+  // ergo we add one whole tick to the final duration. However, this also means
+  // that the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -75,6 +79,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   return xSemaphoreTake(reinterpret_cast<SemaphoreHandle_t>(&native_type_),
                         static_cast<TickType_t>(timeout.count() + 1)) == pdTRUE;
 }
diff --git a/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h b/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
index 762da43..e72ff8e 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/binary_semaphore_inline.h
@@ -47,7 +47,8 @@
 }
 
 inline void BinarySemaphore::acquire() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::BinarySemaphore IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
 #if INCLUDE_vTaskSuspend == 1  // This means portMAX_DELAY is indefinite.
   const BaseType_t result = xSemaphoreTake(
       reinterpret_cast<SemaphoreHandle_t>(&native_type_), portMAX_DELAY);
diff --git a/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h b/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
index d7a1bdb..809ddd8 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/counting_semaphore_inline.h
@@ -36,7 +36,8 @@
 }
 
 inline void CountingSemaphore::acquire() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::CountingSemaphore IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
 #if INCLUDE_vTaskSuspend == 1  // This means portMAX_DELAY is indefinite.
   const BaseType_t result = xSemaphoreTake(
       reinterpret_cast<SemaphoreHandle_t>(&native_type_), portMAX_DELAY);
diff --git a/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h b/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
index 76709a4..a153ed5 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/mutex_inline.h
@@ -41,7 +41,8 @@
 }
 
 inline void Mutex::lock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
 #if INCLUDE_vTaskSuspend == 1  // This means portMAX_DELAY is indefinite.
   const BaseType_t result = xSemaphoreTake(
       reinterpret_cast<SemaphoreHandle_t>(&native_type_), portMAX_DELAY);
@@ -56,13 +57,15 @@
 }
 
 inline bool Mutex::try_lock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   return xSemaphoreTake(reinterpret_cast<SemaphoreHandle_t>(&native_type_),
                         0) == pdTRUE;
 }
 
 inline void Mutex::unlock() {
-  PW_ASSERT(!interrupt::InInterruptContext());
+  // Enforce the pw::sync::Mutex IRQ contract.
+  PW_DASSERT(!interrupt::InInterruptContext());
   // Unlocking only fails if it was not locked first.
   PW_ASSERT(xSemaphoreGive(
                 reinterpret_cast<SemaphoreHandle_t>(&native_type_)) == pdTRUE);
diff --git a/pw_sync_freertos/public/pw_sync_freertos/thread_notification_inline.h b/pw_sync_freertos/public/pw_sync_freertos/thread_notification_inline.h
index 2f4fad8..cf45f21 100644
--- a/pw_sync_freertos/public/pw_sync_freertos/thread_notification_inline.h
+++ b/pw_sync_freertos/public/pw_sync_freertos/thread_notification_inline.h
@@ -36,6 +36,7 @@
 inline ThreadNotification::~ThreadNotification() = default;
 
 inline bool ThreadNotification::try_acquire() {
+  // Enforce the pw::sync::ThreadNotification IRQ contract.
   PW_DASSERT(!interrupt::InInterruptContext());
   taskENTER_CRITICAL();
   const bool notified = native_type_.notified;
diff --git a/pw_sync_freertos/thread_notification.cc b/pw_sync_freertos/thread_notification.cc
index bb1bc8b..0c11b61 100644
--- a/pw_sync_freertos/thread_notification.cc
+++ b/pw_sync_freertos/thread_notification.cc
@@ -42,7 +42,10 @@
 }  // namespace
 
 void ThreadNotification::acquire() {
+  // Enforce the pw::sync::ThreadNotification IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
+
+  // Enforce that only a single thread can block at a time.
   PW_DCHECK(native_type_.blocked_thread == nullptr);
 
   taskENTER_CRITICAL();
diff --git a/pw_sync_freertos/timed_mutex.cc b/pw_sync_freertos/timed_mutex.cc
index 0e860a0..897f7ba 100644
--- a/pw_sync_freertos/timed_mutex.cc
+++ b/pw_sync_freertos/timed_mutex.cc
@@ -28,6 +28,7 @@
 namespace pw::sync {
 
 bool TimedMutex::try_lock_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::TimedMutex IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
 
   // Use non-blocking try_acquire for negative and zero length durations.
@@ -35,8 +36,11 @@
     return try_lock();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // FreeRTOS API, we repeatedly wait with shorter durations. Note that on a
+  // tick based kernel we cannot tell how far along we are on the current tick,
+  // ergo we add one whole tick to the final duration. However, this also means
+  // that the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -47,6 +51,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   return xSemaphoreTake(reinterpret_cast<SemaphoreHandle_t>(&native_handle()),
                         static_cast<TickType_t>(timeout.count() + 1)) == pdTRUE;
 }
diff --git a/pw_sync_freertos/timed_thread_notification.cc b/pw_sync_freertos/timed_thread_notification.cc
index b248bf3..e78ae8b 100644
--- a/pw_sync_freertos/timed_thread_notification.cc
+++ b/pw_sync_freertos/timed_thread_notification.cc
@@ -46,7 +46,10 @@
 }  // namespace
 
 bool TimedThreadNotification::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the pw::sync::TImedThreadNotification IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
+
+  // Enforce that only a single thread can block at a time.
   PW_DCHECK(native_handle().blocked_thread == nullptr);
 
   taskENTER_CRITICAL();
@@ -64,8 +67,12 @@
   taskEXIT_CRITICAL();
 
   const bool notified = [&]() {
-    // On a tick based kernel we cannot tell how far along we are on the current
-    // tick, ergo we add one whole tick to the final duration.
+    // In case the timeout is too long for us to express through the native
+    // FreeRTOS API, we repeatedly wait with shorter durations. Note that on a
+    // tick based kernel we cannot tell how far along we are on the current
+    // tick, ergo we add one whole tick to the final duration. However, this
+    // also means that the loop must ensure that timeout + 1 is less than the
+    // max timeout.
     constexpr SystemClock::duration kMaxTimeoutMinusOne =
         pw::chrono::freertos::kMaxTimeout - SystemClock::duration(1);
     // In case the timeout is too long for us to express through the native
@@ -78,6 +85,8 @@
       timeout -= kMaxTimeoutMinusOne;
     }
 
+    // On a tick based kernel we cannot tell how far along we are on the current
+    // tick, ergo we add one whole tick to the final duration.
     return WaitForNotification(static_cast<TickType_t>(timeout.count() + 1)) ==
            pdTRUE;
   }();
diff --git a/pw_sync_threadx/binary_semaphore.cc b/pw_sync_threadx/binary_semaphore.cc
index 792ac96..18c3ad7 100644
--- a/pw_sync_threadx/binary_semaphore.cc
+++ b/pw_sync_threadx/binary_semaphore.cc
@@ -35,8 +35,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // ThreadX API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::threadx::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -49,6 +52,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   const UINT result =
       tx_semaphore_get(&native_type_, static_cast<ULONG>(timeout.count() + 1));
   if (result == TX_NO_INSTANCE) {
diff --git a/pw_sync_threadx/counting_semaphore.cc b/pw_sync_threadx/counting_semaphore.cc
index ddab800..1ac4c0a 100644
--- a/pw_sync_threadx/counting_semaphore.cc
+++ b/pw_sync_threadx/counting_semaphore.cc
@@ -35,8 +35,11 @@
     return try_acquire();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // ThreadX API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::threadx::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -49,6 +52,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   const UINT result =
       tx_semaphore_get(&native_type_, static_cast<ULONG>(timeout.count() + 1));
   if (result == TX_NO_INSTANCE) {
diff --git a/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h b/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
index d8feabc..f983c0c 100644
--- a/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
+++ b/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
@@ -37,13 +37,13 @@
 
 inline void Mutex::lock() {
   // Enforce the pw::sync::Mutex IRQ contract.
-  PW_ASSERT(!interrupt::InInterruptContext());
+  PW_DASSERT(!interrupt::InInterruptContext());
   PW_ASSERT(tx_mutex_get(&native_type_, TX_WAIT_FOREVER) == TX_SUCCESS);
 }
 
 inline bool Mutex::try_lock() {
   // Enforce the pw::sync::Mutex IRQ contract.
-  PW_ASSERT(!interrupt::InInterruptContext());
+  PW_DASSERT(!interrupt::InInterruptContext());
   const UINT result = tx_mutex_get(&native_type_, TX_NO_WAIT);
   if (result == TX_NOT_AVAILABLE) {
     return false;
@@ -54,7 +54,7 @@
 
 inline void Mutex::unlock() {
   // Enforce the pw::sync::Mutex IRQ contract.
-  PW_ASSERT(!interrupt::InInterruptContext());
+  PW_DASSERT(!interrupt::InInterruptContext());
   PW_ASSERT(tx_mutex_put(&native_type_) == TX_SUCCESS);
 }
 
diff --git a/pw_sync_threadx/public/pw_sync_threadx/thread_notification_inline.h b/pw_sync_threadx/public/pw_sync_threadx/thread_notification_inline.h
index 2631a23..db2f5a8 100644
--- a/pw_sync_threadx/public/pw_sync_threadx/thread_notification_inline.h
+++ b/pw_sync_threadx/public/pw_sync_threadx/thread_notification_inline.h
@@ -31,6 +31,7 @@
 inline ThreadNotification::~ThreadNotification() = default;
 
 inline bool ThreadNotification::try_acquire() {
+  // Enforce the pw::sync::ThreadNotification IRQ contract.
   PW_DASSERT(!interrupt::InInterruptContext());
   {
     std::lock_guard lock(backend::thread_notification_isl);
diff --git a/pw_sync_threadx/thread_notification.cc b/pw_sync_threadx/thread_notification.cc
index 96f07fa..b9c6de8 100644
--- a/pw_sync_threadx/thread_notification.cc
+++ b/pw_sync_threadx/thread_notification.cc
@@ -29,7 +29,10 @@
 }  // namespace backend
 
 void ThreadNotification::acquire() {
+  // Enforce the pw::sync::ThreadNotification IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
+
+  // Enforce that only a single thread can block at a time.
   PW_DCHECK(native_type_.blocked_thread == nullptr);
 
   {
diff --git a/pw_sync_threadx/timed_mutex.cc b/pw_sync_threadx/timed_mutex.cc
index aa0413e..ead0f1b 100644
--- a/pw_sync_threadx/timed_mutex.cc
+++ b/pw_sync_threadx/timed_mutex.cc
@@ -35,8 +35,11 @@
     return try_lock();
   }
 
-  // On a tick based kernel we cannot tell how far along we are on the current
-  // tick, ergo we add one whole tick to the final duration.
+  // In case the timeout is too long for us to express through the native
+  // ThreadX API, we repeatedly wait with shorter durations. Note that on a tick
+  // based kernel we cannot tell how far along we are on the current tick, ergo
+  // we add one whole tick to the final duration. However, this also means that
+  // the loop must ensure that timeout + 1 is less than the max timeout.
   constexpr SystemClock::duration kMaxTimeoutMinusOne =
       pw::chrono::threadx::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
@@ -48,6 +51,8 @@
     }
     timeout -= kMaxTimeoutMinusOne;
   }
+  // On a tick based kernel we cannot tell how far along we are on the current
+  // tick, ergo we add one whole tick to the final duration.
   const UINT result =
       tx_mutex_get(&native_type_, static_cast<ULONG>(timeout.count() + 1));
   if (result == TX_NOT_AVAILABLE) {
diff --git a/pw_sync_threadx/timed_thread_notification.cc b/pw_sync_threadx/timed_thread_notification.cc
index fbf5ba4..882147b 100644
--- a/pw_sync_threadx/timed_thread_notification.cc
+++ b/pw_sync_threadx/timed_thread_notification.cc
@@ -30,8 +30,12 @@
 }  // namespace
 
 bool TimedThreadNotification::try_acquire_for(SystemClock::duration timeout) {
+  // Enforce the TimedThreadNotification IRQ contract.
   PW_DCHECK(!interrupt::InInterruptContext());
+
+  // Enforce that only a single thread can block at a time.
   PW_DCHECK(native_handle().blocked_thread == nullptr);
+
   {
     std::lock_guard lock(backend::thread_notification_isl);
     const bool notified = native_handle().notified;
@@ -45,8 +49,12 @@
   }
 
   const bool notified = [&]() {
-    // On a tick based kernel we cannot tell how far along we are on the current
-    // tick, ergo we add one whole tick to the final duration.
+    // In case the timeout is too long for us to express through the native
+    // ThreadX API, we repeatedly wait with shorter durations. Note that on a
+    // tick based kernel we cannot tell how far along we are on the current
+    // tick, ergo we add one whole tick to the final duration. However, this
+    // also means that the loop must ensure that timeout + 1 is less than the
+    // max timeout.
     constexpr SystemClock::duration kMaxTimeoutMinusOne =
         pw::chrono::threadx::kMaxTimeout - SystemClock::duration(1);
     // In case the timeout is too long for us to express through the native
@@ -61,6 +69,8 @@
       timeout -= kMaxTimeoutMinusOne;
     }
 
+    // On a tick based kernel we cannot tell how far along we are on the current
+    // tick, ergo we add one whole tick to the final duration.
     const UINT result =
         tx_thread_sleep(static_cast<ULONG>(timeout.count() + 1));
     if (result == TX_SUCCESS) {