pw_sync/mutex: add preconditions to pw::sync::Mutex

Adds documented preconditions to pw::sync::Mutex to make it clear
that recursive locking results in undefined behavior. Although we
recommend detecting recursion through the use of an assert, this
can be complicated and come with real overhead as as such we
explicitly define it as UB.

Change-Id: I386e904de3a4b63d9f39dbe3d3bd92cbd0736d9c
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/28580
Commit-Queue: Ewout van Bekkum <ewout@google.com>
Reviewed-by: Wyatt Hepler <hepler@google.com>
diff --git a/pw_sync/mutex_facade_test.cc b/pw_sync/mutex_facade_test.cc
index 046682a..9cc5bf3 100644
--- a/pw_sync/mutex_facade_test.cc
+++ b/pw_sync/mutex_facade_test.cc
@@ -50,24 +50,24 @@
 TEST(Mutex, LockUnlock) {
   pw::sync::Mutex mutex;
   mutex.lock();
-  // Ensure it fails to lock when already held.
-  EXPECT_FALSE(mutex.try_lock());
+  // TODO(pwbug/291): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(mutex.try_lock());
   mutex.unlock();
 }
 
 Mutex static_mutex;
 TEST(Mutex, LockUnlockStatic) {
   static_mutex.lock();
-  // Ensure it fails to lock when already held.
-  EXPECT_FALSE(static_mutex.try_lock());
+  // TODO(pwbug/291): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(static_mutex.try_lock());
   static_mutex.unlock();
 }
 
 TEST(Mutex, TryLockUnlock) {
   pw::sync::Mutex mutex;
   ASSERT_TRUE(mutex.try_lock());
-  // Ensure it fails to lock when already held.
-  EXPECT_FALSE(mutex.try_lock());
+  // TODO(pwbug/291): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(mutex.try_lock());
   mutex.unlock();
 }
 
@@ -79,11 +79,11 @@
   SystemClock::duration time_elapsed = SystemClock::now() - before;
   EXPECT_LT(time_elapsed, kRoundedArbitraryDuration);
 
-  // Ensure it blocks and fails to lock when already held.
-  before = SystemClock::now();
-  EXPECT_FALSE(mutex.try_lock_for(kRoundedArbitraryDuration));
-  time_elapsed = SystemClock::now() - before;
-  EXPECT_GE(time_elapsed, kRoundedArbitraryDuration);
+  // TODO(pwbug/291): Ensure it blocks fails to lock when already held.
+  // before = SystemClock::now();
+  // EXPECT_FALSE(mutex.try_lock_for(kRoundedArbitraryDuration));
+  // time_elapsed = SystemClock::now() - before;
+  /// EXPECT_GE(time_elapsed, kRoundedArbitraryDuration);
 
   mutex.unlock();
 }
@@ -96,10 +96,10 @@
   ASSERT_TRUE(mutex.try_lock_until(deadline));
   EXPECT_LT(SystemClock::now(), deadline);
 
-  // Ensure it blocks and fails to lock when already held.
-  EXPECT_FALSE(
-      mutex.try_lock_until(SystemClock::now() + kRoundedArbitraryDuration));
-  EXPECT_GE(SystemClock::now(), deadline);
+  // TODO(pwbug/291): Ensure it blocks fails to lock when already held.
+  // EXPECT_FALSE(
+  //     mutex.try_lock_until(SystemClock::now() + kRoundedArbitraryDuration));
+  // EXPECT_GE(SystemClock::now(), deadline);
 
   mutex.unlock();
 }
@@ -113,8 +113,8 @@
 TEST(Mutex, TryLockUnlockInC) {
   pw::sync::Mutex mutex;
   ASSERT_TRUE(pw_sync_Mutex_CallTryLock(&mutex));
-  // Ensure it fails to lock when already held.
-  EXPECT_FALSE(pw_sync_Mutex_CallTryLock(&mutex));
+  // TODO(pwbug/291): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(pw_sync_Mutex_CallTryLock(&mutex));
   pw_sync_Mutex_CallUnlock(&mutex);
 }
 
@@ -128,13 +128,14 @@
       pw_chrono_SystemClock_Now().ticks_since_epoch - before.ticks_since_epoch;
   EXPECT_LT(time_elapsed, kRoundedArbitraryDurationInC);
 
-  // Ensure it blocks and fails to lock when already held.
-  before = pw_chrono_SystemClock_Now();
-  EXPECT_FALSE(
-      pw_sync_Mutex_CallTryLockFor(&mutex, kRoundedArbitraryDurationInC));
-  time_elapsed =
-      pw_chrono_SystemClock_Now().ticks_since_epoch - before.ticks_since_epoch;
-  EXPECT_GE(time_elapsed, kRoundedArbitraryDurationInC);
+  // TODO(pwbug/291): Ensure it blocks fails to lock when already held.
+  // before = pw_chrono_SystemClock_Now();
+  // EXPECT_FALSE(
+  //     pw_sync_Mutex_CallTryLockFor(&mutex, kRoundedArbitraryDurationInC));
+  // time_elapsed =
+  //     pw_chrono_SystemClock_Now().ticks_since_epoch -
+  //     before.ticks_since_epoch;
+  // EXPECT_GE(time_elapsed, kRoundedArbitraryDurationInC);
 
   pw_sync_Mutex_CallUnlock(&mutex);
 }
@@ -148,10 +149,10 @@
   EXPECT_LT(pw_chrono_SystemClock_Now().ticks_since_epoch,
             deadline.ticks_since_epoch);
 
-  // Ensure it blocks and fails to lock when already held.
-  EXPECT_FALSE(pw_sync_Mutex_CallTryLockUntil(&mutex, deadline));
-  EXPECT_GE(pw_chrono_SystemClock_Now().ticks_since_epoch,
-            deadline.ticks_since_epoch);
+  // TODO(pwbug/291): Ensure it blocks fails to lock when already held.
+  // EXPECT_FALSE(pw_sync_Mutex_CallTryLockUntil(&mutex, deadline));
+  // EXPECT_GE(pw_chrono_SystemClock_Now().ticks_since_epoch,
+  //           deadline.ticks_since_epoch);
 
   mutex.unlock();
 }
diff --git a/pw_sync/public/pw_sync/mutex.h b/pw_sync/public/pw_sync/mutex.h
index aef10e3..3030c09 100644
--- a/pw_sync/public/pw_sync/mutex.h
+++ b/pw_sync/public/pw_sync/mutex.h
@@ -47,23 +47,42 @@
   Mutex& operator=(Mutex&&) = delete;
 
   // Locks the mutex, blocking indefinitely. Failures are fatal.
+  //
+  // PRECONDITION:
+  //   The lock isn't already held by this thread. Recursive locking is
+  //   undefined behavior.
   void lock();
 
   // Attempts to lock the mutex in a non-blocking manner.
   // Returns true if the mutex was successfully acquired.
+  //
+  // PRECONDITION:
+  //   The lock isn't already held by this thread. Recursive locking is
+  //   undefined behavior.
   bool try_lock();
 
   // Attempts to lock the mutex where, if needed, blocking for at least the
   // specified duration.
   // Returns true if the mutex was successfully acquired.
+  //
+  // PRECONDITION:
+  //   The lock isn't already held by this thread. Recursive locking is
+  //   undefined behavior.
   bool try_lock_for(chrono::SystemClock::duration for_at_least);
 
   // Attempts to lock the mutex where, if needed, blocking until at least the
   // specified time_point.
   // Returns true if the mutex was successfully acquired.
+  //
+  // PRECONDITION:
+  //   The lock isn't already held by this thread. Recursive locking is
+  //   undefined behavior.
   bool try_lock_until(chrono::SystemClock::time_point until_at_least);
 
   // Unlocks the mutex. Failures are fatal.
+  //
+  // PRECONDITION:
+  //   The lock is held by this thread.
   void unlock();
 
   native_handle_type native_handle();