pw_sync/mutex: Consistently debug assert against recursive locks

Updates the embOS and ThreadX backends to add additional debug
asserts to detect accidental recursive mutex use.

Note that this cannot be done on FreeRTOS, however this ends up
failing to recursively lock due to the native implementation.

Change-Id: Iead1371fc2d419d8a9939819c2895801d32ca90e
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/67382
Commit-Queue: Ewout van Bekkum <ewout@google.com>
Pigweed-Auto-Submit: Ewout van Bekkum <ewout@google.com>
Reviewed-by: Keir Mierle <keir@google.com>
diff --git a/pw_sync_embos/public/pw_sync_embos/mutex_inline.h b/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
index 68c94bc..78ad100 100644
--- a/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
+++ b/pw_sync_embos/public/pw_sync_embos/mutex_inline.h
@@ -28,13 +28,19 @@
   // Enforce the pw::sync::Mutex IRQ contract.
   PW_DASSERT(!interrupt::InInterruptContext());
   const int lock_count = OS_Use(&native_type_);
-  PW_ASSERT(lock_count == 1);  // Recursive locking is not permitted.
+  PW_DASSERT(lock_count == 1);  // Recursive locking is not permitted.
 }
 
 inline bool Mutex::try_lock() {
   // Enforce the pw::sync::Mutex IRQ contract.
   PW_DASSERT(!interrupt::InInterruptContext());
-  return OS_Request(&native_type_) != 0;
+  if (OS_Request(&native_type_) == 0) {
+    return false;
+  }
+
+  // Recursive locking is not permitted.
+  PW_DASSERT(OS_GetSemaValue(&native_type_) == 1);
+  return true;
 }
 
 inline void Mutex::unlock() {
diff --git a/pw_sync_embos/timed_mutex.cc b/pw_sync_embos/timed_mutex.cc
index 92cac1b..5bacc10 100644
--- a/pw_sync_embos/timed_mutex.cc
+++ b/pw_sync_embos/timed_mutex.cc
@@ -46,7 +46,7 @@
     const int lock_count = OS_UseTimed(
         &native_handle(), static_cast<OS_TIME>(kMaxTimeoutMinusOne.count()));
     if (lock_count != 0) {
-      PW_CHECK_UINT_EQ(1, lock_count, "Recursive locking is not permitted");
+      PW_DCHECK_UINT_EQ(1, lock_count, "Recursive locking is not permitted");
       return true;
     }
     timeout -= kMaxTimeoutMinusOne;
@@ -55,7 +55,7 @@
   // tick, ergo we add one whole tick to the final duration.
   const int lock_count =
       OS_UseTimed(&native_handle(), static_cast<OS_TIME>(timeout.count() + 1));
-  PW_CHECK_UINT_LE(1, lock_count, "Recursive locking is not permitted");
+  PW_DCHECK_UINT_LE(1, lock_count, "Recursive locking is not permitted");
   return lock_count == 1;
 }
 
diff --git a/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h b/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
index f983c0c..680c8ee 100644
--- a/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
+++ b/pw_sync_threadx/public/pw_sync_threadx/mutex_inline.h
@@ -23,6 +23,14 @@
 
 inline constexpr char kMutexName[] = "pw::Mutex";
 
+// Precondition: The mutex must be held when calling this!
+inline bool NotRecursivelyHeld(TX_MUTEX& mutex) {
+  // Don't use tx_mutex_info_get which goes into a critical section to read
+  // this count. Given that this is only called while the mutex is held, nothing
+  // can mutate this count and ergo we access the control block directly.
+  return mutex.tx_mutex_ownership_count == 1;
+}
+
 }  // namespace backend
 
 inline Mutex::Mutex() : native_type_() {
@@ -39,6 +47,9 @@
   // Enforce the pw::sync::Mutex IRQ contract.
   PW_DASSERT(!interrupt::InInterruptContext());
   PW_ASSERT(tx_mutex_get(&native_type_, TX_WAIT_FOREVER) == TX_SUCCESS);
+
+  // Recursive locking is not supported.
+  PW_DASSERT(backend::NotRecursivelyHeld(native_type_));
 }
 
 inline bool Mutex::try_lock() {
@@ -49,6 +60,8 @@
     return false;
   }
   PW_ASSERT(result == TX_SUCCESS);
+  // Recursive locking is not supported.
+  PW_DASSERT(backend::NotRecursivelyHeld(native_type_));
   return true;
 }
 
diff --git a/pw_sync_threadx/timed_mutex.cc b/pw_sync_threadx/timed_mutex.cc
index ead0f1b..785f228 100644
--- a/pw_sync_threadx/timed_mutex.cc
+++ b/pw_sync_threadx/timed_mutex.cc
@@ -44,7 +44,7 @@
       pw::chrono::threadx::kMaxTimeout - SystemClock::duration(1);
   while (timeout > kMaxTimeoutMinusOne) {
     const UINT result = tx_mutex_get(
-        &native_type_, static_cast<ULONG>(kMaxTimeoutMinusOne.count()));
+        &native_handle(), static_cast<ULONG>(kMaxTimeoutMinusOne.count()));
     if (result != TX_NOT_AVAILABLE) {
       PW_CHECK_UINT_EQ(TX_SUCCESS, result);
       return true;
@@ -54,11 +54,14 @@
   // On a tick based kernel we cannot tell how far along we are on the current
   // tick, ergo we add one whole tick to the final duration.
   const UINT result =
-      tx_mutex_get(&native_type_, static_cast<ULONG>(timeout.count() + 1));
+      tx_mutex_get(&native_handle(), static_cast<ULONG>(timeout.count() + 1));
   if (result == TX_NOT_AVAILABLE) {
     return false;
   }
   PW_CHECK_UINT_EQ(TX_SUCCESS, result);
+
+  PW_DCHECK(backend::NotRecursivelyHeld(native_handle()),
+            "Recursive locking is not supported");
   return true;
 }