Revert "Replace pthread_mutex with std::mutex"

This reverts commit 4b801825bd90b826fb53d51f497aba87d6d00d8c.

Change-Id: I862272c682409778dce356c8cba497ea4b78b14d
diff --git a/audio_a2dp_hw/audio_a2dp_hw.cc b/audio_a2dp_hw/audio_a2dp_hw.cc
index e880894..b00a021 100644
--- a/audio_a2dp_hw/audio_a2dp_hw.cc
+++ b/audio_a2dp_hw/audio_a2dp_hw.cc
@@ -29,7 +29,7 @@
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
-#include <mutex>
+#include <pthread.h>
 #include <stdint.h>
 #include <sys/errno.h>
 #include <sys/socket.h>
@@ -100,7 +100,7 @@
 /* move ctrl_fd outside output stream and keep open until HAL unloaded ? */
 
 struct a2dp_stream_common {
-    std::recursive_mutex    *mutex;
+    pthread_mutex_t         lock;
     int                     ctrl_fd;
     int                     audio_fd;
     size_t                  buffer_sz;
@@ -454,9 +454,13 @@
 
 static void a2dp_stream_common_init(struct a2dp_stream_common *common)
 {
+    pthread_mutexattr_t lock_attr;
+
     FNLOG();
 
-    common->mutex = new std::recursive_mutex;
+    pthread_mutexattr_init(&lock_attr);
+    pthread_mutexattr_settype(&lock_attr, PTHREAD_MUTEX_RECURSIVE);
+    pthread_mutex_init(&common->lock, &lock_attr);
 
     common->ctrl_fd = AUDIO_SKT_DISCONNECTED;
     common->audio_fd = AUDIO_SKT_DISCONNECTED;
@@ -466,14 +470,6 @@
     common->buffer_sz = AUDIO_STREAM_OUTPUT_BUFFER_SZ;
 }
 
-static void a2dp_stream_common_destroy(struct a2dp_stream_common *common)
-{
-    FNLOG();
-
-    delete common->mutex;
-    common->mutex = NULL;
-}
-
 static int start_audio_datapath(struct a2dp_stream_common *common)
 {
     INFO("state %d", common->state);
@@ -575,7 +571,7 @@
 
     DEBUG("write %zu bytes (fd %d)", bytes, out->common.audio_fd);
 
-    std::unique_lock<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
     if (out->common.state == AUDIO_A2DP_STATE_SUSPENDED ||
             out->common.state == AUDIO_A2DP_STATE_STOPPING) {
         DEBUG("stream suspended or closing");
@@ -597,9 +593,9 @@
         goto finish;
     }
 
-    lock.unlock();
+    pthread_mutex_unlock(&out->common.lock);
     sent = skt_write(out->common.audio_fd, buffer,  bytes);
-    lock.lock();
+    pthread_mutex_lock(&out->common.lock);
 
     if (sent == -1) {
         skt_disconnect(out->common.audio_fd);
@@ -617,7 +613,7 @@
     const size_t frames = bytes / audio_stream_out_frame_size(stream);
     out->frames_rendered += frames;
     out->frames_presented += frames;
-    lock.unlock();
+    pthread_mutex_unlock(&out->common.lock);
 
     // If send didn't work out, sleep to emulate write delay.
     if (sent == -1) {
@@ -699,11 +695,12 @@
 
     FNLOG();
 
-    std::lock_guard<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
     // Do nothing in SUSPENDED state.
     if (out->common.state != AUDIO_A2DP_STATE_SUSPENDED)
         retVal = suspend_audio_datapath(&out->common, true);
     out->frames_rendered = 0; // rendered is reset, presented is not
+    pthread_mutex_unlock (&out->common.lock);
 
     return retVal;
 }
@@ -728,7 +725,7 @@
     if (params.empty())
       return status;
 
-    std::lock_guard<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
 
     /* dump params */
     hash_map_utils_dump_string_keys_string_values(params);
@@ -754,6 +751,8 @@
         /* Irrespective of the state, return 0 */
     }
 
+    pthread_mutex_unlock(&out->common.lock);
+
     return status;
 }
 
@@ -804,13 +803,14 @@
         return -EINVAL;
 
     int ret = -EWOULDBLOCK;
-    std::lock_guard<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
     uint64_t latency_frames = (uint64_t)out_get_latency(stream) * out->common.cfg.rate / 1000;
     if (out->frames_presented >= latency_frames) {
         *frames = out->frames_presented - latency_frames;
         clock_gettime(CLOCK_MONOTONIC, timestamp); // could also be associated with out_write().
         ret = 0;
     }
+    pthread_mutex_unlock(&out->common.lock);
     return ret;
 }
 
@@ -823,13 +823,14 @@
     if (stream == NULL || dsp_frames == NULL)
         return -EINVAL;
 
-    std::lock_guard<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
     uint64_t latency_frames = (uint64_t)out_get_latency(stream) * out->common.cfg.rate / 1000;
     if (out->frames_rendered >= latency_frames) {
         *dsp_frames = (uint32_t)(out->frames_rendered - latency_frames);
     } else {
         *dsp_frames = 0;
     }
+    pthread_mutex_unlock(&out->common.lock);
     return 0;
 }
 
@@ -950,7 +951,7 @@
 
     DEBUG("read %zu bytes, state: %d", bytes, in->common.state);
 
-    std::unique_lock<std::recursive_mutex> lock(*in->common.mutex);
+    pthread_mutex_lock(&in->common.lock);
     if (in->common.state == AUDIO_A2DP_STATE_SUSPENDED ||
             in->common.state == AUDIO_A2DP_STATE_STOPPING)
     {
@@ -973,9 +974,9 @@
         goto error;
     }
 
-    lock.unlock();
+    pthread_mutex_unlock(&in->common.lock);
     read = skt_read(in->common.audio_fd, buffer, bytes);
-    lock.lock();
+    pthread_mutex_lock(&in->common.lock);
     if (read == -1)
     {
         skt_disconnect(in->common.audio_fd);
@@ -992,13 +993,13 @@
         memset(buffer, 0, bytes);
         read = bytes;
     }
-    lock.unlock();
+    pthread_mutex_unlock(&in->common.lock);
 
     DEBUG("read %d bytes out of %zu bytes", read, bytes);
     return read;
 
 error:
-    lock.unlock();
+    pthread_mutex_unlock(&in->common.lock);
     memset(buffer, 0, bytes);
     us_delay = calc_audiotime(in->common.cfg, bytes);
     DEBUG("emulate a2dp read delay (%d us)", us_delay);
@@ -1100,7 +1101,6 @@
     return 0;
 
 err_open:
-    a2dp_stream_common_destroy(&out->common);
     free(out);
     *stream_out = NULL;
     a2dp_dev->output = NULL;
@@ -1116,7 +1116,7 @@
 
     INFO("closing output (state %d)", out->common.state);
 
-    std::unique_lock<std::recursive_mutex> lock(*out->common.mutex);
+    pthread_mutex_lock(&out->common.lock);
     if ((out->common.state == AUDIO_A2DP_STATE_STARTED) ||
             (out->common.state == AUDIO_A2DP_STATE_STOPPING)) {
         stop_audio_datapath(&out->common);
@@ -1124,10 +1124,9 @@
 
     skt_disconnect(out->common.ctrl_fd);
     out->common.ctrl_fd = AUDIO_SKT_DISCONNECTED;
-    lock.unlock();
-    a2dp_stream_common_destroy(&out->common);
     free(stream);
     a2dp_dev->output = NULL;
+    pthread_mutex_unlock(&out->common.lock);
 
     DEBUG("done");
 }
@@ -1276,7 +1275,6 @@
     return 0;
 
 err_open:
-    a2dp_stream_common_destroy(&in->common);
     free(in);
     *stream_in = NULL;
     a2dp_dev->input = NULL;
@@ -1298,7 +1296,6 @@
 
     skt_disconnect(in->common.ctrl_fd);
     in->common.ctrl_fd = AUDIO_SKT_DISCONNECTED;
-    a2dp_stream_common_destroy(&in->common);
     free(stream);
     a2dp_dev->input = NULL;
 
diff --git a/btcore/src/module.cc b/btcore/src/module.cc
index 453656a..6a78a99 100644
--- a/btcore/src/module.cc
+++ b/btcore/src/module.cc
@@ -20,9 +20,8 @@
 
 #include <assert.h>
 #include <dlfcn.h>
+#include <pthread.h>
 #include <string.h>
-
-#include <mutex>
 #include <unordered_map>
 
 #include "btcore/include/module.h"
@@ -38,8 +37,8 @@
 
 static std::unordered_map<const module_t*, module_state_t> metadata;
 
-// TODO(jamuraa): remove this lock after the startup sequence is clean
-static std::mutex metadata_mutex;
+// Include this lock for now for correctness, while the startup sequence is being refactored
+static pthread_mutex_t metadata_lock;
 
 static bool call_lifecycle_function(module_lifecycle_fn function);
 static module_state_t get_module_state(const module_t *module);
@@ -47,10 +46,13 @@
 
 
 void module_management_start(void) {
+  pthread_mutex_init(&metadata_lock, NULL);
 }
 
 void module_management_stop(void) {
   metadata.clear();
+
+  pthread_mutex_destroy(&metadata_lock);
 }
 
 const module_t *get_module(const char *name) {
@@ -150,15 +152,17 @@
 }
 
 static module_state_t get_module_state(const module_t *module) {
-  std::lock_guard<std::mutex> lock(metadata_mutex);
+  pthread_mutex_lock(&metadata_lock);
   auto map_ptr = metadata.find(module);
+  pthread_mutex_unlock(&metadata_lock);
 
   return (map_ptr != metadata.end()) ? map_ptr->second : MODULE_STATE_NONE;
 }
 
 static void set_module_state(const module_t *module, module_state_t state) {
-  std::lock_guard<std::mutex> lock(metadata_mutex);
+  pthread_mutex_lock(&metadata_lock);
   metadata[module] = state;
+  pthread_mutex_unlock(&metadata_lock);
 }
 
 // TODO(zachoverflow): remove when everything modulized
diff --git a/btcore/src/osi_module.cc b/btcore/src/osi_module.cc
index 011838b..ff71446 100644
--- a/btcore/src/osi_module.cc
+++ b/btcore/src/osi_module.cc
@@ -23,16 +23,19 @@
 #include "osi/include/alarm.h"
 #include "osi/include/future.h"
 #include "osi/include/log.h"
+#include "osi/include/mutex.h"
 #include "osi/include/osi.h"
 #include "osi/include/wakelock.h"
 
 future_t *osi_init(void) {
+  mutex_init();
   return future_new_immediate(FUTURE_SUCCESS);
 }
 
 future_t *osi_clean_up(void) {
   alarm_cleanup();
   wakelock_cleanup();
+  mutex_cleanup();
   return future_new_immediate(FUTURE_SUCCESS);
 }
 
diff --git a/hci/src/btsnoop_net.cc b/hci/src/btsnoop_net.cc
index deb703c..bce7885 100644
--- a/hci/src/btsnoop_net.cc
+++ b/hci/src/btsnoop_net.cc
@@ -30,8 +30,6 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <mutex>
-
 #include "osi/include/log.h"
 #include "osi/include/osi.h"
 
@@ -44,7 +42,7 @@
 
 static pthread_t listen_thread_;
 static bool listen_thread_valid_ = false;
-static std::mutex client_socket_mutex_;
+static pthread_mutex_t client_socket_lock_ = PTHREAD_MUTEX_INITIALIZER;
 static int listen_socket_ = -1;
 static int client_socket_ = -1;
 
@@ -81,7 +79,7 @@
   return;  // Disable using network sockets for security reasons
 #endif
 
-  std::lock_guard<std::mutex> lock(client_socket_mutex_);
+  pthread_mutex_lock(&client_socket_lock_);
   if (client_socket_ != -1) {
     ssize_t ret;
     OSI_NO_INTR(ret = send(client_socket_, data, length, 0));
@@ -90,6 +88,7 @@
       safe_close_(&client_socket_);
     }
   }
+  pthread_mutex_unlock(&client_socket_lock_);
 }
 
 static void* listen_fn_(UNUSED_ATTR void* context) {
@@ -140,11 +139,12 @@
 
     /* When a new client connects, we have to send the btsnoop file header. This
      * allows a decoder to treat the session as a new, valid btsnoop file. */
-    std::lock_guard<std::mutex> lock(client_socket_mutex_);
+    pthread_mutex_lock(&client_socket_lock_);
     safe_close_(&client_socket_);
     client_socket_ = client_socket;
 
     OSI_NO_INTR(send(client_socket_, "btsnoop\0\0\0\0\1\0\0\x3\xea", 16, 0));
+    pthread_mutex_unlock(&client_socket_lock_);
   }
 
 cleanup:
diff --git a/hci/src/hci_layer.cc b/hci/src/hci_layer.cc
index b8ac022..5a87bab 100644
--- a/hci/src/hci_layer.cc
+++ b/hci/src/hci_layer.cc
@@ -21,13 +21,12 @@
 #include "hci_layer.h"
 
 #include <assert.h>
+#include <pthread.h>
 #include <signal.h>
 #include <string.h>
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <mutex>
-
 #include "btcore/include/module.h"
 #include "btsnoop.h"
 #include "buffer_allocator.h"
@@ -153,7 +152,7 @@
 // Inbound-related
 static alarm_t* command_response_timer;
 static list_t* commands_pending_response;
-static std::mutex commands_pending_response_mutex;
+static pthread_mutex_t commands_pending_response_lock;
 static packet_receive_data_t incoming_packets[INBOUND_PACKET_TYPE_COUNT];
 
 // The hand-off point for data going to a higher layer, set by the higher layer
@@ -174,6 +173,8 @@
   command_credits = 1;
   firmware_is_configured = false;
 
+  pthread_mutex_init(&commands_pending_response_lock, NULL);
+
   // For now, always use the default timeout on non-Android builds.
   period_ms_t startup_timeout_ms = DEFAULT_STARTUP_TIMEOUT_MS;
 
@@ -302,6 +303,8 @@
   list_free(commands_pending_response);
   commands_pending_response = NULL;
 
+  pthread_mutex_destroy(&commands_pending_response_lock);
+
   packet_fragmenter->cleanup();
 
   // Free the timers
@@ -407,23 +410,27 @@
 
   alarm_cancel(startup_timer);
 
-  std::lock_guard<std::mutex> lock(commands_pending_response_mutex);
+  pthread_mutex_lock(&commands_pending_response_lock);
 
   if (startup_future == NULL) {
     // The firmware configuration took too long - ignore the callback
+    pthread_mutex_unlock(&commands_pending_response_lock);
     return;
   }
   firmware_is_configured = success;
   future_ready(startup_future, success ? FUTURE_SUCCESS : FUTURE_FAIL);
   startup_future = NULL;
+
+  pthread_mutex_unlock(&commands_pending_response_lock);
 }
 
 static void startup_timer_expired(UNUSED_ATTR void* context) {
   LOG_ERROR(LOG_TAG, "%s", __func__);
 
-  std::lock_guard<std::mutex> lock(commands_pending_response_mutex);
+  pthread_mutex_lock(&commands_pending_response_lock);
   future_ready(startup_future, FUTURE_FAIL);
   startup_future = NULL;
+  pthread_mutex_unlock(&commands_pending_response_lock);
 }
 
 // Postload functions
@@ -468,10 +475,9 @@
     command_credits--;
 
     // Move it to the list of commands awaiting response
-    {
-      std::lock_guard<std::mutex> lock(commands_pending_response_mutex);
-      list_append(commands_pending_response, wait_entry);
-    }
+    pthread_mutex_lock(&commands_pending_response_lock);
+    list_append(commands_pending_response, wait_entry);
+    pthread_mutex_unlock(&commands_pending_response_lock);
 
     // Send it off
     low_power_manager->wake_assert();
@@ -518,14 +524,14 @@
 }
 
 static void command_timed_out(UNUSED_ATTR void* context) {
-  std::unique_lock<std::mutex> lock(commands_pending_response_mutex);
+  pthread_mutex_lock(&commands_pending_response_lock);
 
   if (list_is_empty(commands_pending_response)) {
     LOG_ERROR(LOG_TAG, "%s with no commands pending response", __func__);
   } else {
     waiting_command_t* wait_entry =
         static_cast<waiting_command_t*>(list_front(commands_pending_response));
-    lock.unlock();
+    pthread_mutex_unlock(&commands_pending_response_lock);
 
     // We shouldn't try to recover the stack from this command timeout.
     // If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
@@ -773,7 +779,7 @@
 }
 
 static waiting_command_t* get_waiting_command(command_opcode_t opcode) {
-  std::lock_guard<std::mutex> lock(commands_pending_response_mutex);
+  pthread_mutex_lock(&commands_pending_response_lock);
 
   for (const list_node_t* node = list_begin(commands_pending_response);
        node != list_end(commands_pending_response); node = list_next(node)) {
@@ -784,9 +790,11 @@
 
     list_remove(commands_pending_response, wait_entry);
 
+    pthread_mutex_unlock(&commands_pending_response_lock);
     return wait_entry;
   }
 
+  pthread_mutex_unlock(&commands_pending_response_lock);
   return NULL;
 }
 
diff --git a/osi/include/mutex.h b/osi/include/mutex.h
index 81a98d2..3d1b306 100644
--- a/osi/include/mutex.h
+++ b/osi/include/mutex.h
@@ -24,6 +24,12 @@
 extern "C" {
 #endif
 
+// Mutex-related state init
+void mutex_init(void);
+
+// Mutex-related state cleanup
+void mutex_cleanup(void);
+
 // Lock the global mutex
 void mutex_global_lock(void);
 
diff --git a/osi/src/alarm.cc b/osi/src/alarm.cc
index 08bf9f8..dfe7a85 100644
--- a/osi/src/alarm.cc
+++ b/osi/src/alarm.cc
@@ -27,14 +27,13 @@
 #include <fcntl.h>
 #include <inttypes.h>
 #include <malloc.h>
+#include <pthread.h>
 #include <signal.h>
 #include <string.h>
 #include <time.h>
 
 #include <hardware/bluetooth.h>
 
-#include <mutex>
-
 #include "osi/include/allocator.h"
 #include "osi/include/fixed_queue.h"
 #include "osi/include/list.h"
@@ -71,12 +70,12 @@
 } alarm_stats_t;
 
 struct alarm_t {
-  // The mutex is held while the callback for this alarm is being executed.
+  // The lock is held while the callback for this alarm is being executed.
   // It allows us to release the coarse-grained monitor lock while a
   // potentially long-running callback is executing. |alarm_cancel| uses this
-  // mutex to provide a guarantee to its caller that the callback will not be
+  // lock to provide a guarantee to its caller that the callback will not be
   // in progress when it returns.
-  std::recursive_mutex *callback_mutex;
+  pthread_mutex_t callback_lock;
   period_ms_t creation_time;
   period_ms_t period;
   period_ms_t deadline;
@@ -105,7 +104,7 @@
 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback
 // functions execute serially and not concurrently. As a result, this mutex
 // also protects the |alarms| list.
-static std::mutex alarms_mutex;
+static pthread_mutex_t monitor;
 static list_t* alarms;
 static timer_t timer;
 static timer_t wakeup_timer;
@@ -157,21 +156,45 @@
     return NULL;
   }
 
+  pthread_mutexattr_t attr;
+  pthread_mutexattr_init(&attr);
+
   alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t)));
 
-  ret->callback_mutex = new std::recursive_mutex;
+  // Make this a recursive mutex to make it safe to call |alarm_cancel| from
+  // within the callback function of the alarm.
+  int error = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  if (error) {
+    LOG_ERROR(LOG_TAG, "%s unable to create a recursive mutex: %s", __func__,
+              strerror(error));
+    goto error;
+  }
+
+  error = pthread_mutex_init(&ret->callback_lock, &attr);
+  if (error) {
+    LOG_ERROR(LOG_TAG, "%s unable to initialize mutex: %s", __func__,
+              strerror(error));
+    goto error;
+  }
+
   ret->is_periodic = is_periodic;
   ret->stats.name = osi_strdup(name);
   // NOTE: The stats were reset by osi_calloc() above
 
+  pthread_mutexattr_destroy(&attr);
   return ret;
+
+error:
+  pthread_mutexattr_destroy(&attr);
+  osi_free(ret);
+  return NULL;
 }
 
 void alarm_free(alarm_t* alarm) {
   if (!alarm) return;
 
   alarm_cancel(alarm);
-  delete alarm->callback_mutex;
+  pthread_mutex_destroy(&alarm->callback_lock);
   osi_free((void*)alarm->stats.name);
   osi_free(alarm);
 }
@@ -181,8 +204,9 @@
   period_ms_t remaining_ms = 0;
   period_ms_t just_now = now();
 
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
   if (alarm->deadline > just_now) remaining_ms = alarm->deadline - just_now;
+  pthread_mutex_unlock(&monitor);
 
   return remaining_ms;
 }
@@ -206,7 +230,7 @@
   assert(alarm != NULL);
   assert(cb != NULL);
 
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
 
   alarm->creation_time = now();
   alarm->period = period;
@@ -216,23 +240,25 @@
 
   schedule_next_instance(alarm);
   alarm->stats.scheduled_count++;
+
+  pthread_mutex_unlock(&monitor);
 }
 
 void alarm_cancel(alarm_t* alarm) {
   assert(alarms != NULL);
   if (!alarm) return;
 
-  {
-    std::lock_guard<std::mutex> lock(alarms_mutex);
-    alarm_cancel_internal(alarm);
-  }
+  pthread_mutex_lock(&monitor);
+  alarm_cancel_internal(alarm);
+  pthread_mutex_unlock(&monitor);
 
   // If the callback for |alarm| is in progress, wait here until it completes.
-  std::lock_guard<std::recursive_mutex> lock(*alarm->callback_mutex);
+  pthread_mutex_lock(&alarm->callback_lock);
+  pthread_mutex_unlock(&alarm->callback_lock);
 }
 
 // Internal implementation of canceling an alarm.
-// The caller must hold the |alarms_mutex|
+// The caller must hold the |monitor| lock.
 static void alarm_cancel_internal(alarm_t* alarm) {
   bool needs_reschedule =
       (!list_is_empty(alarms) && list_front(alarms) == alarm);
@@ -263,7 +289,7 @@
   thread_free(dispatcher_thread);
   dispatcher_thread = NULL;
 
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
 
   fixed_queue_free(default_callback_queue, NULL);
   default_callback_queue = NULL;
@@ -277,6 +303,9 @@
 
   list_free(alarms);
   alarms = NULL;
+
+  pthread_mutex_unlock(&monitor);
+  pthread_mutex_destroy(&monitor);
 }
 
 static bool lazy_initialize(void) {
@@ -287,7 +316,7 @@
   bool timer_initialized = false;
   bool wakeup_timer_initialized = false;
 
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_init(&monitor, NULL);
 
   alarms = list_new(NULL);
   if (!alarms) {
@@ -356,6 +385,8 @@
   list_free(alarms);
   alarms = NULL;
 
+  pthread_mutex_destroy(&monitor);
+
   return false;
 }
 
@@ -373,7 +404,7 @@
 }
 
 // Remove alarm from internal alarm list and the processing queue
-// The caller must hold the |alarms_mutex|
+// The caller must hold the |monitor| lock.
 static void remove_pending_alarm(alarm_t* alarm) {
   list_remove(alarms, alarm);
   while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) {
@@ -382,7 +413,7 @@
   }
 }
 
-// Must be called with |alarms_mutex| held
+// Must be called with monitor held
 static void schedule_next_instance(alarm_t* alarm) {
   // If the alarm is currently set and it's at the start of the list,
   // we'll need to re-schedule since we've adjusted the earliest deadline.
@@ -421,7 +452,7 @@
   }
 }
 
-// NOTE: must be called with |alarms_mutex| held
+// NOTE: must be called with monitor lock.
 static void reschedule_root_alarm(void) {
   assert(alarms != NULL);
 
@@ -527,7 +558,7 @@
   fixed_queue_unregister_dequeue(queue);
 
   // Cancel all alarms that are using this queue
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
   for (list_node_t* node = list_begin(alarms); node != list_end(alarms);) {
     alarm_t* alarm = (alarm_t*)list_node(node);
     node = list_next(node);
@@ -536,14 +567,16 @@
     // an assert.
     if (alarm->queue == queue) alarm_cancel_internal(alarm);
   }
+  pthread_mutex_unlock(&monitor);
 }
 
 static void alarm_queue_ready(fixed_queue_t* queue, UNUSED_ATTR void* context) {
   assert(queue != NULL);
 
-  std::unique_lock<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
   alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue);
   if (alarm == NULL) {
+    pthread_mutex_unlock(&monitor);
     return;  // The alarm was probably canceled
   }
 
@@ -565,8 +598,8 @@
     alarm->data = NULL;
   }
 
-  std::lock_guard<std::recursive_mutex> cb_lock(*alarm->callback_mutex);
-  lock.unlock();
+  pthread_mutex_lock(&alarm->callback_lock);
+  pthread_mutex_unlock(&monitor);
 
   period_ms_t t0 = now();
   callback(data);
@@ -576,6 +609,8 @@
   assert(t1 >= t0);
   period_ms_t delta = t1 - t0;
   update_scheduling_stats(&alarm->stats, t0, deadline, delta);
+
+  pthread_mutex_unlock(&alarm->callback_lock);
 }
 
 // Callback function for wake alarms and our posix timer
@@ -592,15 +627,17 @@
     semaphore_wait(alarm_expired);
     if (!dispatcher_thread_active) break;
 
-    std::lock_guard<std::mutex> lock(alarms_mutex);
+    pthread_mutex_lock(&monitor);
     alarm_t* alarm;
 
     // Take into account that the alarm may get cancelled before we get to it.
     // We're done here if there are no alarms or the alarm at the front is in
-    // the future. Exit right away since there's nothing left to do.
+    // the future. Release the monitor lock and exit right away since there's
+    // nothing left to do.
     if (list_is_empty(alarms) ||
         (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline > now()) {
       reschedule_root_alarm();
+      pthread_mutex_unlock(&monitor);
       continue;
     }
 
@@ -615,6 +652,8 @@
 
     // Enqueue the alarm for processing
     fixed_queue_enqueue(alarm->queue, alarm);
+
+    pthread_mutex_unlock(&monitor);
   }
 
   LOG_DEBUG(LOG_TAG, "%s Callback thread exited", __func__);
@@ -677,9 +716,10 @@
 void alarm_debug_dump(int fd) {
   dprintf(fd, "\nBluetooth Alarms Statistics:\n");
 
-  std::lock_guard<std::mutex> lock(alarms_mutex);
+  pthread_mutex_lock(&monitor);
 
   if (alarms == NULL) {
+    pthread_mutex_unlock(&monitor);
     dprintf(fd, "  None\n");
     return;
   }
@@ -723,4 +763,5 @@
 
     dprintf(fd, "\n");
   }
+  pthread_mutex_unlock(&monitor);
 }
diff --git a/osi/src/fixed_queue.cc b/osi/src/fixed_queue.cc
index 954ca38..a4cd08d 100644
--- a/osi/src/fixed_queue.cc
+++ b/osi/src/fixed_queue.cc
@@ -17,10 +17,9 @@
  ******************************************************************************/
 
 #include <assert.h>
+#include <pthread.h>
 #include <string.h>
 
-#include <mutex>
-
 #include "osi/include/allocator.h"
 #include "osi/include/fixed_queue.h"
 #include "osi/include/list.h"
@@ -32,7 +31,7 @@
   list_t* list;
   semaphore_t* enqueue_sem;
   semaphore_t* dequeue_sem;
-  std::mutex *mutex;
+  pthread_mutex_t lock;
   size_t capacity;
 
   reactor_object_t* dequeue_object;
@@ -46,7 +45,7 @@
   fixed_queue_t* ret =
       static_cast<fixed_queue_t*>(osi_calloc(sizeof(fixed_queue_t)));
 
-  ret->mutex = new std::mutex;
+  pthread_mutex_init(&ret->lock, NULL);
   ret->capacity = capacity;
 
   ret->list = list_new(NULL);
@@ -78,7 +77,7 @@
   list_free(queue->list);
   semaphore_free(queue->enqueue_sem);
   semaphore_free(queue->dequeue_sem);
-  delete queue->mutex;
+  pthread_mutex_destroy(&queue->lock);
   osi_free(queue);
 }
 
@@ -96,15 +95,21 @@
 bool fixed_queue_is_empty(fixed_queue_t* queue) {
   if (queue == NULL) return true;
 
-  std::lock_guard<std::mutex> lock(*queue->mutex);
-  return list_is_empty(queue->list);
+  pthread_mutex_lock(&queue->lock);
+  bool is_empty = list_is_empty(queue->list);
+  pthread_mutex_unlock(&queue->lock);
+
+  return is_empty;
 }
 
 size_t fixed_queue_length(fixed_queue_t* queue) {
   if (queue == NULL) return 0;
 
-  std::lock_guard<std::mutex> lock(*queue->mutex);
-  return list_length(queue->list);
+  pthread_mutex_lock(&queue->lock);
+  size_t length = list_length(queue->list);
+  pthread_mutex_unlock(&queue->lock);
+
+  return length;
 }
 
 size_t fixed_queue_capacity(fixed_queue_t* queue) {
@@ -119,10 +124,9 @@
 
   semaphore_wait(queue->enqueue_sem);
 
-  {
-    std::lock_guard<std::mutex> lock(*queue->mutex);
-    list_append(queue->list, data);
-  }
+  pthread_mutex_lock(&queue->lock);
+  list_append(queue->list, data);
+  pthread_mutex_unlock(&queue->lock);
 
   semaphore_post(queue->dequeue_sem);
 }
@@ -132,12 +136,10 @@
 
   semaphore_wait(queue->dequeue_sem);
 
-  void *ret = NULL;
-  {
-    std::lock_guard<std::mutex> lock(*queue->mutex);
-    ret = list_front(queue->list);
-    list_remove(queue->list, ret);
-  }
+  pthread_mutex_lock(&queue->lock);
+  void* ret = list_front(queue->list);
+  list_remove(queue->list, ret);
+  pthread_mutex_unlock(&queue->lock);
 
   semaphore_post(queue->enqueue_sem);
 
@@ -150,10 +152,9 @@
 
   if (!semaphore_try_wait(queue->enqueue_sem)) return false;
 
-  {
-    std::lock_guard<std::mutex> lock(*queue->mutex);
-    list_append(queue->list, data);
-  }
+  pthread_mutex_lock(&queue->lock);
+  list_append(queue->list, data);
+  pthread_mutex_unlock(&queue->lock);
 
   semaphore_post(queue->dequeue_sem);
   return true;
@@ -164,12 +165,10 @@
 
   if (!semaphore_try_wait(queue->dequeue_sem)) return NULL;
 
-  void *ret = NULL;
-  {
-    std::lock_guard<std::mutex> lock(*queue->mutex);
-    ret = list_front(queue->list);
-    list_remove(queue->list, ret);
-  }
+  pthread_mutex_lock(&queue->lock);
+  void* ret = list_front(queue->list);
+  list_remove(queue->list, ret);
+  pthread_mutex_unlock(&queue->lock);
 
   semaphore_post(queue->enqueue_sem);
 
@@ -179,29 +178,34 @@
 void* fixed_queue_try_peek_first(fixed_queue_t* queue) {
   if (queue == NULL) return NULL;
 
-  std::lock_guard<std::mutex> lock(*queue->mutex);
-  return list_is_empty(queue->list) ? NULL : list_front(queue->list);
+  pthread_mutex_lock(&queue->lock);
+  void* ret = list_is_empty(queue->list) ? NULL : list_front(queue->list);
+  pthread_mutex_unlock(&queue->lock);
+
+  return ret;
 }
 
 void* fixed_queue_try_peek_last(fixed_queue_t* queue) {
   if (queue == NULL) return NULL;
 
-  std::lock_guard<std::mutex> lock(*queue->mutex);
-  return list_is_empty(queue->list) ? NULL : list_back(queue->list);
+  pthread_mutex_lock(&queue->lock);
+  void* ret = list_is_empty(queue->list) ? NULL : list_back(queue->list);
+  pthread_mutex_unlock(&queue->lock);
+
+  return ret;
 }
 
 void* fixed_queue_try_remove_from_queue(fixed_queue_t* queue, void* data) {
   if (queue == NULL) return NULL;
 
   bool removed = false;
-  {
-    std::lock_guard<std::mutex> lock(*queue->mutex);
-    if (list_contains(queue->list, data) &&
-        semaphore_try_wait(queue->dequeue_sem)) {
-      removed = list_remove(queue->list, data);
-      assert(removed);
-    }
+  pthread_mutex_lock(&queue->lock);
+  if (list_contains(queue->list, data) &&
+      semaphore_try_wait(queue->dequeue_sem)) {
+    removed = list_remove(queue->list, data);
+    assert(removed);
   }
+  pthread_mutex_unlock(&queue->lock);
 
   if (removed) {
     semaphore_post(queue->enqueue_sem);
@@ -213,8 +217,8 @@
 list_t* fixed_queue_get_list(fixed_queue_t* queue) {
   assert(queue != NULL);
 
-  // NOTE: This function has no chance of being thread safe, so don't worry
-  // about locking the list.
+  // NOTE: This function is not thread safe, and there is no point for
+  // calling pthread_mutex_lock() / pthread_mutex_unlock()
   return queue->list;
 }
 
diff --git a/osi/src/mutex.cc b/osi/src/mutex.cc
index fbbef40..4cbb45f 100644
--- a/osi/src/mutex.cc
+++ b/osi/src/mutex.cc
@@ -18,12 +18,21 @@
 
 #define LOG_TAG "bt_osi_mutex"
 
-#include <mutex>
+#include <pthread.h>
 
 #include "osi/include/mutex.h"
 
-static std::recursive_mutex global_mutex;
+static pthread_mutex_t global_lock;
 
-void mutex_global_lock(void) { global_mutex.lock(); }
+void mutex_init(void) {
+  pthread_mutexattr_t attr;
+  pthread_mutexattr_init(&attr);
+  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
+  pthread_mutex_init(&global_lock, &attr);
+}
 
-void mutex_global_unlock(void) { global_mutex.unlock(); }
+void mutex_cleanup(void) { pthread_mutex_destroy(&global_lock); }
+
+void mutex_global_lock(void) { pthread_mutex_lock(&global_lock); }
+
+void mutex_global_unlock(void) { pthread_mutex_unlock(&global_lock); }
diff --git a/osi/src/reactor.cc b/osi/src/reactor.cc
index 6062665..b77809a 100644
--- a/osi/src/reactor.cc
+++ b/osi/src/reactor.cc
@@ -29,8 +29,6 @@
 #include <sys/eventfd.h>
 #include <unistd.h>
 
-#include <mutex>
-
 #include "osi/include/allocator.h"
 #include "osi/include/list.h"
 #include "osi/include/log.h"
@@ -42,7 +40,7 @@
 struct reactor_t {
   int epoll_fd;
   int event_fd;
-  std::mutex *list_mutex;
+  pthread_mutex_t list_lock;  // protects invalidation_list.
   list_t* invalidation_list;  // reactor objects that have been unregistered.
   pthread_t run_thread;       // the pthread on which reactor_run is executing.
   bool is_running;            // indicates whether |run_thread| is valid.
@@ -53,7 +51,8 @@
   int fd;              // the file descriptor to monitor for events.
   void* context;       // a context that's passed back to the *_ready functions.
   reactor_t* reactor;  // the reactor instance this object is registered with.
-  std::mutex *mutex;   // protects the lifetime of this object and all variables.
+  pthread_mutex_t
+      lock;  // protects the lifetime of this object and all variables.
 
   void (*read_ready)(void* context);   // function to call when the file
                                        // descriptor becomes readable.
@@ -86,7 +85,7 @@
     goto error;
   }
 
-  ret->list_mutex = new std::mutex;
+  pthread_mutex_init(&ret->list_lock, NULL);
   ret->invalidation_list = list_new(NULL);
   if (!ret->invalidation_list) {
     LOG_ERROR(LOG_TAG, "%s unable to allocate object invalidation list.",
@@ -150,7 +149,7 @@
   object->context = context;
   object->read_ready = read_ready;
   object->write_ready = write_ready;
-  object->mutex = new std::mutex;
+  pthread_mutex_init(&object->lock, NULL);
 
   struct epoll_event event;
   memset(&event, 0, sizeof(event));
@@ -161,7 +160,7 @@
   if (epoll_ctl(reactor->epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1) {
     LOG_ERROR(LOG_TAG, "%s unable to register fd %d to epoll set: %s", __func__,
               fd, strerror(errno));
-    delete object->mutex;
+    pthread_mutex_destroy(&object->lock);
     osi_free(object);
     return NULL;
   }
@@ -187,9 +186,10 @@
     return false;
   }
 
-  std::lock_guard<std::mutex> lock(*object->mutex);
+  pthread_mutex_lock(&object->lock);
   object->read_ready = read_ready;
   object->write_ready = write_ready;
+  pthread_mutex_unlock(&object->lock);
 
   return true;
 }
@@ -209,10 +209,9 @@
     return;
   }
 
-  {
-    std::unique_lock<std::mutex> lock(*reactor->list_mutex);
-    list_append(reactor->invalidation_list, obj);
-  }
+  pthread_mutex_lock(&reactor->list_lock);
+  list_append(reactor->invalidation_list, obj);
+  pthread_mutex_unlock(&reactor->list_lock);
 
   // Taking the object lock here makes sure a callback for |obj| isn't
   // currently executing. The reactor thread must then either be before
@@ -222,9 +221,9 @@
   // invalidation_list and find it in there. So by taking this lock, we
   // are waiting until the reactor thread drops all references to |obj|.
   // One the wait completes, we can unlock and destroy |obj| safely.
-  obj->mutex->lock();
-  obj->mutex->unlock();
-  delete obj->mutex;
+  pthread_mutex_lock(&obj->lock);
+  pthread_mutex_unlock(&obj->lock);
+  pthread_mutex_destroy(&obj->lock);
   osi_free(obj);
 }
 
@@ -239,10 +238,9 @@
 
   struct epoll_event events[MAX_EVENTS];
   for (int i = 0; iterations == 0 || i < iterations; ++i) {
-    {
-      std::lock_guard<std::mutex> lock(*reactor->list_mutex);
-      list_clear(reactor->invalidation_list);
-    }
+    pthread_mutex_lock(&reactor->list_lock);
+    list_clear(reactor->invalidation_list);
+    pthread_mutex_unlock(&reactor->list_lock);
 
     int ret;
     OSI_NO_INTR(ret = epoll_wait(reactor->epoll_fd, events, MAX_EVENTS, -1));
@@ -266,27 +264,27 @@
 
       reactor_object_t* object = (reactor_object_t*)events[j].data.ptr;
 
-      std::unique_lock<std::mutex> lock(*reactor->list_mutex);
+      pthread_mutex_lock(&reactor->list_lock);
       if (list_contains(reactor->invalidation_list, object)) {
+        pthread_mutex_unlock(&reactor->list_lock);
         continue;
       }
 
       // Downgrade the list lock to an object lock.
-      {
-        std::lock_guard<std::mutex> obj_lock(*object->mutex);
-        lock.unlock();
+      pthread_mutex_lock(&object->lock);
+      pthread_mutex_unlock(&reactor->list_lock);
 
-        reactor->object_removed = false;
-        if (events[j].events & (EPOLLIN | EPOLLHUP | EPOLLRDHUP | EPOLLERR) &&
-            object->read_ready)
-          object->read_ready(object->context);
-        if (!reactor->object_removed && events[j].events & EPOLLOUT &&
-            object->write_ready)
-          object->write_ready(object->context);
-      }
+      reactor->object_removed = false;
+      if (events[j].events & (EPOLLIN | EPOLLHUP | EPOLLRDHUP | EPOLLERR) &&
+          object->read_ready)
+        object->read_ready(object->context);
+      if (!reactor->object_removed && events[j].events & EPOLLOUT &&
+          object->write_ready)
+        object->write_ready(object->context);
+      pthread_mutex_unlock(&object->lock);
 
       if (reactor->object_removed) {
-        delete object->mutex;
+        pthread_mutex_destroy(&object->lock);
         osi_free(object);
       }
     }
diff --git a/osi/src/wakelock.cc b/osi/src/wakelock.cc
index 8e1a739..777aa8c 100644
--- a/osi/src/wakelock.cc
+++ b/osi/src/wakelock.cc
@@ -31,7 +31,6 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <mutex>
 #include <string>
 
 #include "osi/include/alarm.h"
@@ -78,7 +77,7 @@
 
 // This mutex ensures that the functions that update and dump the statistics
 // are executed serially.
-static std::mutex stats_mutex;
+static pthread_mutex_t monitor;
 
 static bt_status_t wakelock_acquire_callout(void);
 static bt_status_t wakelock_acquire_native(void);
@@ -183,6 +182,7 @@
 }
 
 static void wakelock_initialize(void) {
+  pthread_mutex_init(&monitor, NULL);
   reset_wakelock_stats();
 
   if (is_native) wakelock_initialize_native();
@@ -212,6 +212,7 @@
   wake_lock_path.clear();
   wake_unlock_path.clear();
   initialized = PTHREAD_ONCE_INIT;
+  pthread_mutex_destroy(&monitor);
 }
 
 void wakelock_set_paths(const char* lock_path, const char* unlock_path) {
@@ -234,7 +235,7 @@
 // Reset the Bluetooth wakelock statistics.
 // This function is thread-safe.
 static void reset_wakelock_stats(void) {
-  std::lock_guard<std::mutex> lock(stats_mutex);
+  pthread_mutex_lock(&monitor);
 
   wakelock_stats.is_acquired = false;
   wakelock_stats.acquired_count = 0;
@@ -248,6 +249,8 @@
   wakelock_stats.last_acquired_timestamp_ms = 0;
   wakelock_stats.last_released_timestamp_ms = 0;
   wakelock_stats.last_reset_timestamp_ms = now();
+
+  pthread_mutex_unlock(&monitor);
 }
 
 //
@@ -261,7 +264,7 @@
 static void update_wakelock_acquired_stats(bt_status_t acquired_status) {
   const period_ms_t now_ms = now();
 
-  std::lock_guard<std::mutex> lock(stats_mutex);
+  pthread_mutex_lock(&monitor);
 
   if (acquired_status != BT_STATUS_SUCCESS) {
     wakelock_stats.acquired_errors++;
@@ -269,6 +272,7 @@
   }
 
   if (wakelock_stats.is_acquired) {
+    pthread_mutex_unlock(&monitor);
     return;
   }
 
@@ -276,6 +280,8 @@
   wakelock_stats.acquired_count++;
   wakelock_stats.last_acquired_timestamp_ms = now_ms;
 
+  pthread_mutex_unlock(&monitor);
+
   metrics_wake_event(WAKE_EVENT_ACQUIRED, NULL, WAKE_LOCK_ID, now_ms);
 }
 
@@ -290,7 +296,7 @@
 static void update_wakelock_released_stats(bt_status_t released_status) {
   const period_ms_t now_ms = now();
 
-  std::lock_guard<std::mutex> lock(stats_mutex);
+  pthread_mutex_lock(&monitor);
 
   if (released_status != BT_STATUS_SUCCESS) {
     wakelock_stats.released_errors++;
@@ -298,6 +304,7 @@
   }
 
   if (!wakelock_stats.is_acquired) {
+    pthread_mutex_unlock(&monitor);
     return;
   }
 
@@ -317,13 +324,17 @@
   wakelock_stats.last_acquired_interval_ms = delta_ms;
   wakelock_stats.total_acquired_interval_ms += delta_ms;
 
+  pthread_mutex_unlock(&monitor);
+
   metrics_wake_event(WAKE_EVENT_RELEASED, NULL, WAKE_LOCK_ID, now_ms);
 }
 
 void wakelock_debug_dump(int fd) {
   const period_ms_t now_ms = now();
 
-  std::lock_guard<std::mutex> lock(stats_mutex);
+  // Need to keep track for lock errors - e.g., the "monitor" mutex
+  // might not be initialized
+  const int lock_error = pthread_mutex_lock(&monitor);
 
   // Compute the last acquired interval if the wakelock is still acquired
   period_ms_t delta_ms = 0;
@@ -364,4 +375,6 @@
   dprintf(
       fd, "  Total run time (ms)            : %llu\n",
       (unsigned long long)(now_ms - wakelock_stats.last_reset_timestamp_ms));
+
+  if (lock_error == 0) pthread_mutex_unlock(&monitor);
 }
diff --git a/udrv/ulinux/uipc.cc b/udrv/ulinux/uipc.cc
index dd873d8..898d839 100644
--- a/udrv/ulinux/uipc.cc
+++ b/udrv/ulinux/uipc.cc
@@ -20,13 +20,12 @@
  *
  *  Filename:      uipc.cc
  *
- *  Description:   UIPC implementation for fluoride
+ *  Description:   UIPC implementation for bluedroid
  *
  *****************************************************************************/
 
 #include <errno.h>
 #include <fcntl.h>
-#include <mutex>
 #include <pthread.h>
 #include <signal.h>
 #include <stdio.h>
@@ -61,6 +60,9 @@
 
 #define UIPC_DISCONNECTED (-1)
 
+#define UIPC_LOCK() /*BTIF_TRACE_EVENT(" %s lock", __func__);*/ pthread_mutex_lock(&uipc_main.mutex);
+#define UIPC_UNLOCK() /*BTIF_TRACE_EVENT("%s unlock", __func__);*/ pthread_mutex_unlock(&uipc_main.mutex);
+
 #define SAFE_FD_ISSET(fd, set) (((fd) == -1) ? false : FD_ISSET((fd), (set)))
 
 #define UIPC_FLUSH_BUFFER_SIZE 1024
@@ -78,13 +80,16 @@
     int fd;
     int read_poll_tmo_ms;
     int task_evt_flags;   /* event flags pending to be processed in read task */
+    tUIPC_EVENT cond_flags;
+    pthread_mutex_t cond_mutex;
+    pthread_cond_t  cond;
     tUIPC_RCV_CBACK *cback;
 } tUIPC_CHAN;
 
 typedef struct {
     pthread_t tid; /* main thread id */
     int running;
-    std::recursive_mutex mutex;
+    pthread_mutex_t mutex;
 
     fd_set active_set;
     fd_set read_set;
@@ -218,6 +223,10 @@
 static int uipc_main_init(void)
 {
     int i;
+    pthread_mutexattr_t attr;
+    pthread_mutexattr_init(&attr);
+    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+    pthread_mutex_init(&uipc_main.mutex, &attr);
 
     BTIF_TRACE_EVENT("### uipc_main_init ###");
 
@@ -236,6 +245,8 @@
         p->srvfd = UIPC_DISCONNECTED;
         p->fd = UIPC_DISCONNECTED;
         p->task_evt_flags = 0;
+        pthread_cond_init(&p->cond, NULL);
+        pthread_mutex_init(&p->cond_mutex, NULL);
         p->cback = NULL;
     }
 
@@ -265,6 +276,7 @@
 
     for (i=0; i<UIPC_CH_NUM; i++)
     {
+        //BTIF_TRACE_EVENT("CHECK TASK FLAGS %x %x",  uipc_main.ch[i].task_evt_flags, UIPC_TASK_FLAG_DISCONNECT_CHAN);
         if (uipc_main.ch[i].task_evt_flags & UIPC_TASK_FLAG_DISCONNECT_CHAN)
         {
             uipc_main.ch[i].task_evt_flags &= ~UIPC_TASK_FLAG_DISCONNECT_CHAN;
@@ -294,8 +306,8 @@
 
         if ((uipc_main.ch[ch_id].fd > 0) && uipc_main.ch[ch_id].cback)
         {
-            /* if we have a callback we should add this fd to the active set
-               and notify user with callback event */
+            /*  if we have a callback we should add this fd to the active set
+                and notify user with callback event */
             BTIF_TRACE_EVENT("ADD FD %d TO ACTIVE SET", uipc_main.ch[ch_id].fd);
             FD_SET(uipc_main.ch[ch_id].fd, &uipc_main.active_set);
             uipc_main.max_fd = MAX(uipc_main.max_fd, uipc_main.ch[ch_id].fd);
@@ -351,13 +363,15 @@
     if (ch_id >= UIPC_CH_NUM)
         return -1;
 
-    std::lock_guard<std::recursive_mutex> guard(uipc_main.mutex);
+    UIPC_LOCK();
 
     fd = create_server_socket(name);
 
-    if (fd < 0) {
-      BTIF_TRACE_ERROR("failed to setup %s", name, strerror(errno));
-      return -1;
+    if (fd < 0)
+    {
+        BTIF_TRACE_ERROR("failed to setup %s", name, strerror(errno));
+        UIPC_UNLOCK();
+         return -1;
     }
 
     BTIF_TRACE_EVENT("ADD SERVER FD TO ACTIVE SET %d", fd);
@@ -371,6 +385,8 @@
     /* trigger main thread to update read set */
     uipc_wakeup_locked();
 
+    UIPC_UNLOCK();
+
     return 0;
 }
 
@@ -513,7 +529,7 @@
             continue;
         }
 
-        std::lock_guard<std::recursive_mutex> guard(uipc_main.mutex);
+        UIPC_LOCK();
 
         /* clear any wakeup interrupt */
         uipc_check_interrupt_locked();
@@ -530,6 +546,8 @@
             if (ch_id != UIPC_CH_ID_AV_AUDIO)
                 uipc_check_fd_locked(ch_id);
         }
+
+        UIPC_UNLOCK();
     }
 
     BTIF_TRACE_EVENT("UIPC READ THREAD EXITING");
@@ -562,11 +580,10 @@
 void uipc_stop_main_server_thread(void)
 {
     /* request shutdown of read thread */
-    {
-      std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
-      uipc_main.running = 0;
-      uipc_wakeup_locked();
-    }
+    UIPC_LOCK();
+    uipc_main.running = 0;
+    uipc_wakeup_locked();
+    UIPC_UNLOCK();
 
     /* wait until read thread is fully terminated */
     /* tid might hold pointer value where it's value
@@ -610,15 +627,18 @@
 {
     BTIF_TRACE_DEBUG("UIPC_Open : ch_id %d, p_cback %x", ch_id, p_cback);
 
-    std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+    UIPC_LOCK();
 
-    if (ch_id >= UIPC_CH_NUM) {
+    if (ch_id >= UIPC_CH_NUM)
+    {
+        UIPC_UNLOCK();
         return false;
     }
 
     if (uipc_main.ch[ch_id].srvfd != UIPC_DISCONNECTED)
     {
         BTIF_TRACE_EVENT("CHANNEL %d ALREADY OPEN", ch_id);
+        UIPC_UNLOCK();
         return 0;
     }
 
@@ -633,6 +653,8 @@
             break;
     }
 
+    UIPC_UNLOCK();
+
     return true;
 }
 
@@ -651,10 +673,14 @@
     BTIF_TRACE_DEBUG("UIPC_Close : ch_id %d", ch_id);
 
     /* special case handling uipc shutdown */
-    if (ch_id != UIPC_CH_ID_ALL) {
-        std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+    if (ch_id != UIPC_CH_ID_ALL)
+    {
+        UIPC_LOCK();
         uipc_close_locked(ch_id);
-    } else {
+        UIPC_UNLOCK();
+    }
+    else
+    {
         BTIF_TRACE_DEBUG("UIPC_Close : waiting for shutdown to complete");
         uipc_stop_main_server_thread();
         BTIF_TRACE_DEBUG("UIPC_Close : shutdown complete");
@@ -676,7 +702,7 @@
 {
     BTIF_TRACE_DEBUG("UIPC_Send : ch_id:%d %d bytes", ch_id, msglen);
 
-    std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+    UIPC_LOCK();
 
     ssize_t ret;
     OSI_NO_INTR(ret = write(uipc_main.ch[ch_id].fd, p_buf, msglen));
@@ -684,6 +710,8 @@
         BTIF_TRACE_ERROR("failed to write (%s)", strerror(errno));
     }
 
+    UIPC_UNLOCK();
+
     return false;
 }
 
@@ -747,8 +775,9 @@
         if (pfd.revents & (POLLHUP|POLLNVAL) )
         {
             BTIF_TRACE_WARNING("poll : channel detached remotely");
-            std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+            UIPC_LOCK();
             uipc_close_locked(ch_id);
+            UIPC_UNLOCK();
             return 0;
         }
 
@@ -760,8 +789,9 @@
         if (n == 0)
         {
             BTIF_TRACE_WARNING("UIPC_Read : channel detached remotely");
-            std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+            UIPC_LOCK();
             uipc_close_locked(ch_id);
+            UIPC_UNLOCK();
             return 0;
         }
 
@@ -792,7 +822,7 @@
 {
     BTIF_TRACE_DEBUG("#### UIPC_Ioctl : ch_id %d, request %d ####", ch_id, request);
 
-    std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex);
+    UIPC_LOCK();
 
     switch(request)
     {
@@ -828,6 +858,8 @@
             break;
     }
 
+    UIPC_UNLOCK();
+
     return false;
 }
 
diff --git a/utils/src/bt_utils.cc b/utils/src/bt_utils.cc
index 4fbb574..a524edf 100644
--- a/utils/src/bt_utils.cc
+++ b/utils/src/bt_utils.cc
@@ -30,7 +30,6 @@
 #include "bt_utils.h"
 
 #include <errno.h>
-#include <mutex>
 #include <pthread.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -56,14 +55,14 @@
 ********************************************************************************/
 static pthread_once_t g_DoSchedulingGroupOnce[TASK_HIGH_MAX];
 static bool    g_DoSchedulingGroup[TASK_HIGH_MAX];
-static std::mutex gIdxLock;
+static pthread_mutex_t         gIdxLock;
 static int g_TaskIdx;
 static int g_TaskIDs[TASK_HIGH_MAX];
 #define INVALID_TASK_ID  (-1)
 
-
 static future_t *init(void) {
   int i;
+  pthread_mutexattr_t lock_attr;
 
   for(i = 0; i < TASK_HIGH_MAX; i++) {
     g_DoSchedulingGroupOnce[i] = PTHREAD_ONCE_INIT;
@@ -71,10 +70,13 @@
     g_TaskIDs[i] = INVALID_TASK_ID;
   }
 
+  pthread_mutexattr_init(&lock_attr);
+  pthread_mutex_init(&gIdxLock, &lock_attr);
   return NULL;
 }
 
 static future_t *clean_up(void) {
+  pthread_mutex_destroy(&gIdxLock);
   return NULL;
 }
 
@@ -123,24 +125,23 @@
     int tid = gettid();
     int priority = ANDROID_PRIORITY_AUDIO;
 
-    {
-      std::lock_guard<std::mutex> lock(gIdxLock);
-      g_TaskIdx = high_task;
+    pthread_mutex_lock(&gIdxLock);
+    g_TaskIdx = high_task;
 
-      // TODO(armansito): Remove this conditional check once we find a solution
-      // for system/core on non-Android platforms.
+    // TODO(armansito): Remove this conditional check once we find a solution
+    // for system/core on non-Android platforms.
 #if defined(OS_GENERIC)
-      rc = -1;
+    rc = -1;
 #else  // !defined(OS_GENERIC)
-      pthread_once(&g_DoSchedulingGroupOnce[g_TaskIdx], check_do_scheduling_group);
-      if (g_DoSchedulingGroup[g_TaskIdx]) {
-          // set_sched_policy does not support tid == 0
-          rc = set_sched_policy(tid, SP_AUDIO_SYS);
-      }
+    pthread_once(&g_DoSchedulingGroupOnce[g_TaskIdx], check_do_scheduling_group);
+    if (g_DoSchedulingGroup[g_TaskIdx]) {
+        // set_sched_policy does not support tid == 0
+        rc = set_sched_policy(tid, SP_AUDIO_SYS);
+    }
 #endif  // defined(OS_GENERIC)
 
-      g_TaskIDs[high_task] = tid;
-    }
+    g_TaskIDs[high_task] = tid;
+    pthread_mutex_unlock(&gIdxLock);
 
     if (rc) {
         LOG_WARN(LOG_TAG, "failed to change sched policy, tid %d, err: %d", tid, errno);