Formatting some files with LOG macros usage.

In order to create a clean CL to switch to RTC_ prefixed LOG macros
this CL runs `git cl format --full` on the files with LOG macros in
the following directories:
- modules/audio_device
- modules/media_file
- modules/video_capture

This CL has been automatically generated with:

for m in PLOG \
  LOG_TAG \
  LOG_GLEM \
  LOG_GLE_EX \
  LOG_GLE \
  LAST_SYSTEM_ERROR \
  LOG_ERRNO_EX \
  LOG_ERRNO \
  LOG_ERR_EX \
  LOG_ERR \
  LOG_V \
  LOG_F \
  LOG_T_F \
  LOG_E \
  LOG_T \
  LOG_CHECK_LEVEL_V \
  LOG_CHECK_LEVEL \
  LOG
do
  for d in media_file video_capture audio_device; do
    cd modules/$d
    git grep -l $m | grep -E "\.(cc|h|m|mm)$" | xargs sed -i "1 s/$/ /"
    cd ../..
  done
done
git cl format --full

Bug: webrtc:8452
Change-Id: I2858b6928e6bd79957f2e5e0b07028eb68a304b2
Reviewed-on: https://webrtc-review.googlesource.com/21322
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20613}
diff --git a/modules/audio_device/android/audio_device_template.h b/modules/audio_device/android/audio_device_template.h
index 5be3a9e..04ff1bc 100644
--- a/modules/audio_device/android/audio_device_template.h
+++ b/modules/audio_device/android/audio_device_template.h
@@ -100,18 +100,16 @@
     return 1;
   }
 
-  int32_t PlayoutDeviceName(
-      uint16_t index,
-      char name[kAdmMaxDeviceNameSize],
-      char guid[kAdmMaxGuidSize]) override {
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override {
     FATAL() << "Should never be called";
     return -1;
   }
 
-  int32_t RecordingDeviceName(
-      uint16_t index,
-      char name[kAdmMaxDeviceNameSize],
-      char guid[kAdmMaxGuidSize]) override {
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override {
     FATAL() << "Should never be called";
     return -1;
   }
@@ -215,9 +213,7 @@
     return err;
   }
 
-  bool Recording() const override {
-    return input_.Recording() ;
-  }
+  bool Recording() const override { return input_.Recording(); }
 
   int32_t SetAGC(bool enable) override {
     if (enable) {
@@ -276,7 +272,7 @@
     return output_.MinSpeakerVolume(minVolume);
   }
 
-  int32_t MicrophoneVolumeIsAvailable(bool& available) override{
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override {
     available = false;
     return -1;
   }
diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc
index 9e38a85..6f385a3 100644
--- a/modules/audio_device/android/audio_manager.cc
+++ b/modules/audio_device/android/audio_manager.cc
@@ -107,9 +107,9 @@
   // that the user explicitly selects the high-latency audio path, hence we use
   // the selected |audio_layer| here to set the delay estimate.
   delay_estimate_in_milliseconds_ =
-      (audio_layer == AudioDeviceModule::kAndroidJavaAudio) ?
-      kHighLatencyModeDelayEstimateInMilliseconds :
-      kLowLatencyModeDelayEstimateInMilliseconds;
+      (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
+          ? kHighLatencyModeDelayEstimateInMilliseconds
+          : kLowLatencyModeDelayEstimateInMilliseconds;
   ALOGD("delay_estimate_in_milliseconds: %d", delay_estimate_in_milliseconds_);
 }
 
@@ -201,8 +201,9 @@
   ALOGD("IsLowLatencyPlayoutSupported()");
   // Some devices are blacklisted for usage of OpenSL ES even if they report
   // that low-latency playout is supported. See b/21485703 for details.
-  return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage() ?
-      false : low_latency_playout_;
+  return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
+             ? false
+             : low_latency_playout_;
 }
 
 bool AudioManager::IsLowLatencyRecordSupported() const {
diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc
index b437644..79f8c8b 100644
--- a/modules/audio_device/android/audio_record_jni.cc
+++ b/modules/audio_device/android/audio_record_jni.cc
@@ -41,8 +41,8 @@
 
 AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
 
-int AudioRecordJni::JavaAudioRecord::InitRecording(
-    int sample_rate, size_t channels) {
+int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
+                                                   size_t channels) {
   return audio_record_->CallIntMethod(init_recording_,
                                       static_cast<jint>(sample_rate),
                                       static_cast<jint>(channels));
@@ -83,10 +83,10 @@
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
-      reinterpret_cast<void*>(
-          &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
+       reinterpret_cast<void*>(
+           &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
       {"nativeDataIsRecorded", "(IJ)V",
-      reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
+       reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
       arraysize(native_methods));
@@ -168,7 +168,7 @@
   thread_checker_java_.DetachFromThread();
   initialized_ = false;
   recording_ = false;
-  direct_buffer_address_= nullptr;
+  direct_buffer_address_ = nullptr;
   return 0;
 }
 
@@ -206,29 +206,32 @@
   return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
 }
 
-void JNICALL AudioRecordJni::CacheDirectBufferAddress(
-    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord) {
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                      jobject obj,
+                                                      jobject byte_buffer,
+                                                      jlong nativeAudioRecord) {
   webrtc::AudioRecordJni* this_object =
-      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
   this_object->OnCacheDirectBufferAddress(env, byte_buffer);
 }
 
-void AudioRecordJni::OnCacheDirectBufferAddress(
-    JNIEnv* env, jobject byte_buffer) {
+void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                                jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!direct_buffer_address_);
-  direct_buffer_address_ =
-      env->GetDirectBufferAddress(byte_buffer);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
   direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
 }
 
-void JNICALL AudioRecordJni::DataIsRecorded(
-  JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
+void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
+                                            jobject obj,
+                                            jint length,
+                                            jlong nativeAudioRecord) {
   webrtc::AudioRecordJni* this_object =
-      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
   this_object->OnDataIsRecorded(length);
 }
 
diff --git a/modules/audio_device/android/audio_track_jni.cc b/modules/audio_device/android/audio_track_jni.cc
index 45e59c4..89d4af0 100644
--- a/modules/audio_device/android/audio_track_jni.cc
+++ b/modules/audio_device/android/audio_track_jni.cc
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "modules/audio_device/android/audio_manager.h"
 #include "modules/audio_device/android/audio_track_jni.h"
+#include "modules/audio_device/android/audio_manager.h"
 
 #include <utility>
 
@@ -82,10 +82,10 @@
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
-      reinterpret_cast<void*>(
-          &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
+       reinterpret_cast<void*>(
+           &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
       {"nativeGetPlayoutData", "(IJ)V",
-      reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
+       reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
       arraysize(native_methods));
@@ -122,8 +122,8 @@
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!initialized_);
   RTC_DCHECK(!playing_);
-  if (!j_audio_track_->InitPlayout(
-      audio_parameters_.sample_rate(), audio_parameters_.channels())) {
+  if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
+                                   audio_parameters_.channels())) {
     ALOGE("InitPlayout failed!");
     return -1;
   }
@@ -209,20 +209,21 @@
   audio_device_buffer_->SetPlayoutChannels(channels);
 }
 
-void JNICALL AudioTrackJni::CacheDirectBufferAddress(
-    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack) {
+void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                     jobject obj,
+                                                     jobject byte_buffer,
+                                                     jlong nativeAudioTrack) {
   webrtc::AudioTrackJni* this_object =
-      reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
   this_object->OnCacheDirectBufferAddress(env, byte_buffer);
 }
 
-void AudioTrackJni::OnCacheDirectBufferAddress(
-    JNIEnv* env, jobject byte_buffer) {
+void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                               jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!direct_buffer_address_);
-  direct_buffer_address_ =
-      env->GetDirectBufferAddress(byte_buffer);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
   direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
@@ -231,10 +232,12 @@
   ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
 }
 
-void JNICALL AudioTrackJni::GetPlayoutData(
-  JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack) {
+void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
+                                           jobject obj,
+                                           jint length,
+                                           jlong nativeAudioTrack) {
   webrtc::AudioTrackJni* this_object =
-      reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
   this_object->OnGetPlayoutData(static_cast<size_t>(length));
 }
 
diff --git a/modules/audio_device/android/opensles_player.cc b/modules/audio_device/android/opensles_player.cc
index 1530741..7ac6912 100644
--- a/modules/audio_device/android/opensles_player.cc
+++ b/modules/audio_device/android/opensles_player.cc
@@ -289,10 +289,10 @@
   SLDataSink audio_sink = {&locator_output_mix, nullptr};
 
   // Define interfaces that we indend to use and realize.
-  const SLInterfaceID interface_ids[] = {
-      SL_IID_ANDROIDCONFIGURATION, SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
-  const SLboolean interface_required[] = {
-      SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+  const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
+                                         SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+  const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+                                          SL_BOOLEAN_TRUE};
 
   // Create the audio player on the engine interface.
   RETURN_ON_ERROR(
diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h
index a68bbdf..8b8e907 100644
--- a/modules/audio_device/audio_device_buffer.h
+++ b/modules/audio_device/audio_device_buffer.h
@@ -197,7 +197,7 @@
   // dynamically.
   rtc::BufferT<int16_t> rec_buffer_ RTC_ACCESS_ON(recording_thread_checker_);
 
-  // AGC parameters.
+// AGC parameters.
 #if !defined(WEBRTC_WIN)
   uint32_t current_mic_level_ RTC_ACCESS_ON(recording_thread_checker_);
 #else
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index 12caa2e..0a669ca 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -386,7 +386,7 @@
 int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) {
   LOG(INFO) << __FUNCTION__;
   CHECKinitialized_();
-  bool isAvailable  = false;
+  bool isAvailable = false;
   if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
     return -1;
   }
diff --git a/modules/audio_device/dummy/file_audio_device.cc b/modules/audio_device/dummy/file_audio_device.cc
index 6b0ee04..6954762 100644
--- a/modules/audio_device/dummy/file_audio_device.cc
+++ b/modules/audio_device/dummy/file_audio_device.cc
@@ -26,24 +26,23 @@
     kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
 
 FileAudioDevice::FileAudioDevice(const char* inputFilename,
-                                 const char* outputFilename):
-    _ptrAudioBuffer(NULL),
-    _recordingBuffer(NULL),
-    _playoutBuffer(NULL),
-    _recordingFramesLeft(0),
-    _playoutFramesLeft(0),
-    _recordingBufferSizeIn10MS(0),
-    _recordingFramesIn10MS(0),
-    _playoutFramesIn10MS(0),
-    _playing(false),
-    _recording(false),
-    _lastCallPlayoutMillis(0),
-    _lastCallRecordMillis(0),
-    _outputFile(*FileWrapper::Create()),
-    _inputFile(*FileWrapper::Create()),
-    _outputFilename(outputFilename),
-    _inputFilename(inputFilename) {
-}
+                                 const char* outputFilename)
+    : _ptrAudioBuffer(NULL),
+      _recordingBuffer(NULL),
+      _playoutBuffer(NULL),
+      _recordingFramesLeft(0),
+      _playoutFramesLeft(0),
+      _recordingBufferSizeIn10MS(0),
+      _recordingFramesIn10MS(0),
+      _playoutFramesIn10MS(0),
+      _playing(false),
+      _recording(false),
+      _lastCallPlayoutMillis(0),
+      _lastCallRecordMillis(0),
+      _outputFile(*FileWrapper::Create()),
+      _inputFile(*FileWrapper::Create()),
+      _outputFilename(outputFilename),
+      _inputFilename(inputFilename) {}
 
 FileAudioDevice::~FileAudioDevice() {
   delete &_outputFile;
@@ -59,9 +58,13 @@
   return InitStatus::OK;
 }
 
-int32_t FileAudioDevice::Terminate() { return 0; }
+int32_t FileAudioDevice::Terminate() {
+  return 0;
+}
 
-bool FileAudioDevice::Initialized() const { return true; }
+bool FileAudioDevice::Initialized() const {
+  return true;
+}
 
 int16_t FileAudioDevice::PlayoutDevices() {
   return 1;
@@ -72,8 +75,8 @@
 }
 
 int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
-                                            char name[kAdmMaxDeviceNameSize],
-                                            char guid[kAdmMaxGuidSize]) {
+                                           char name[kAdmMaxDeviceNameSize],
+                                           char guid[kAdmMaxGuidSize]) {
   const char* kName = "dummy_device";
   const char* kGuid = "dummy_device_unique_id";
   if (index < 1) {
@@ -87,8 +90,8 @@
 }
 
 int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
-                                              char name[kAdmMaxDeviceNameSize],
-                                              char guid[kAdmMaxGuidSize]) {
+                                             char name[kAdmMaxDeviceNameSize],
+                                             char guid[kAdmMaxGuidSize]) {
   const char* kName = "dummy_device";
   const char* kGuid = "dummy_device_unique_id";
   if (index < 1) {
@@ -138,9 +141,9 @@
 
 int32_t FileAudioDevice::InitPlayout() {
   if (_ptrAudioBuffer) {
-      // Update webrtc audio buffer with the selected parameters
-      _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
-      _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
+    _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
   }
   return 0;
 }
@@ -180,7 +183,7 @@
 
 int32_t FileAudioDevice::StartPlayout() {
   if (_playing) {
-      return 0;
+    return 0;
   }
 
   _playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
@@ -188,7 +191,7 @@
   _playoutFramesLeft = 0;
 
   if (!_playoutBuffer) {
-      _playoutBuffer = new int8_t[kPlayoutBufferSize];
+    _playoutBuffer = new int8_t[kPlayoutBufferSize];
   }
   if (!_playoutBuffer) {
     _playing = false;
@@ -200,7 +203,7 @@
       !_outputFile.OpenFile(_outputFilename.c_str(), false)) {
     LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename;
     _playing = false;
-    delete [] _playoutBuffer;
+    delete[] _playoutBuffer;
     _playoutBuffer = NULL;
     return -1;
   }
@@ -210,32 +213,30 @@
   _ptrThreadPlay->Start();
   _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
 
-  LOG(LS_INFO) << "Started playout capture to output file: "
-               << _outputFilename;
+  LOG(LS_INFO) << "Started playout capture to output file: " << _outputFilename;
   return 0;
 }
 
 int32_t FileAudioDevice::StopPlayout() {
   {
-      rtc::CritScope lock(&_critSect);
-      _playing = false;
+    rtc::CritScope lock(&_critSect);
+    _playing = false;
   }
 
   // stop playout thread first
   if (_ptrThreadPlay) {
-      _ptrThreadPlay->Stop();
-      _ptrThreadPlay.reset();
+    _ptrThreadPlay->Stop();
+    _ptrThreadPlay.reset();
   }
 
   rtc::CritScope lock(&_critSect);
 
   _playoutFramesLeft = 0;
-  delete [] _playoutBuffer;
+  delete[] _playoutBuffer;
   _playoutBuffer = NULL;
   _outputFile.CloseFile();
 
-  LOG(LS_INFO) << "Stopped playout capture to output file: "
-               << _outputFilename;
+  LOG(LS_INFO) << "Stopped playout capture to output file: " << _outputFilename;
   return 0;
 }
 
@@ -247,11 +248,10 @@
   _recording = true;
 
   // Make sure we only create the buffer once.
-  _recordingBufferSizeIn10MS = _recordingFramesIn10MS *
-                               kRecordingNumChannels *
-                               2;
+  _recordingBufferSizeIn10MS =
+      _recordingFramesIn10MS * kRecordingNumChannels * 2;
   if (!_recordingBuffer) {
-      _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+    _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
   }
 
   if (!_inputFilename.empty() &&
@@ -269,13 +269,11 @@
   _ptrThreadRec->Start();
   _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
 
-  LOG(LS_INFO) << "Started recording from input file: "
-               << _inputFilename;
+  LOG(LS_INFO) << "Started recording from input file: " << _inputFilename;
 
   return 0;
 }
 
-
 int32_t FileAudioDevice::StopRecording() {
   {
     rtc::CritScope lock(&_critSect);
@@ -283,20 +281,19 @@
   }
 
   if (_ptrThreadRec) {
-      _ptrThreadRec->Stop();
-      _ptrThreadRec.reset();
+    _ptrThreadRec->Stop();
+    _ptrThreadRec.reset();
   }
 
   rtc::CritScope lock(&_critSect);
   _recordingFramesLeft = 0;
   if (_recordingBuffer) {
-      delete [] _recordingBuffer;
-      _recordingBuffer = NULL;
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
   }
   _inputFile.CloseFile();
 
-  LOG(LS_INFO) << "Stopped recording from input file: "
-               << _inputFilename;
+  LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename;
   return 0;
 }
 
@@ -304,25 +301,41 @@
   return _recording;
 }
 
-int32_t FileAudioDevice::SetAGC(bool enable) { return -1; }
+int32_t FileAudioDevice::SetAGC(bool enable) {
+  return -1;
+}
 
-bool FileAudioDevice::AGC() const { return false; }
+bool FileAudioDevice::AGC() const {
+  return false;
+}
 
-int32_t FileAudioDevice::InitSpeaker() { return -1; }
+int32_t FileAudioDevice::InitSpeaker() {
+  return -1;
+}
 
-bool FileAudioDevice::SpeakerIsInitialized() const { return false; }
+bool FileAudioDevice::SpeakerIsInitialized() const {
+  return false;
+}
 
-int32_t FileAudioDevice::InitMicrophone() { return 0; }
+int32_t FileAudioDevice::InitMicrophone() {
+  return 0;
+}
 
-bool FileAudioDevice::MicrophoneIsInitialized() const { return true; }
+bool FileAudioDevice::MicrophoneIsInitialized() const {
+  return true;
+}
 
 int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
   return -1;
 }
 
-int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) { return -1; }
+int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const { return -1; }
+int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
   return -1;
@@ -336,7 +349,9 @@
   return -1;
 }
 
-int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) { return -1; }
+int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) {
+  return -1;
+}
 
 int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
   return -1;
@@ -350,19 +365,29 @@
   return -1;
 }
 
-int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) { return -1; }
+int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SetSpeakerMute(bool enable) { return -1; }
+int32_t FileAudioDevice::SetSpeakerMute(bool enable) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SpeakerMute(bool& enabled) const { return -1; }
+int32_t FileAudioDevice::SpeakerMute(bool& enabled) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
   return -1;
 }
 
-int32_t FileAudioDevice::SetMicrophoneMute(bool enable) { return -1; }
+int32_t FileAudioDevice::SetMicrophoneMute(bool enable) {
+  return -1;
+}
 
-int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const { return -1; }
+int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
   available = true;
@@ -409,81 +434,76 @@
   _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
-bool FileAudioDevice::PlayThreadFunc(void* pThis)
-{
-    return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
+bool FileAudioDevice::PlayThreadFunc(void* pThis) {
+  return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
 }
 
-bool FileAudioDevice::RecThreadFunc(void* pThis)
-{
-    return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
+bool FileAudioDevice::RecThreadFunc(void* pThis) {
+  return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
 }
 
-bool FileAudioDevice::PlayThreadProcess()
-{
-    if (!_playing) {
-        return false;
-    }
-    int64_t currentTime = rtc::TimeMillis();
-    _critSect.Enter();
+bool FileAudioDevice::PlayThreadProcess() {
+  if (!_playing) {
+    return false;
+  }
+  int64_t currentTime = rtc::TimeMillis();
+  _critSect.Enter();
 
-    if (_lastCallPlayoutMillis == 0 ||
-        currentTime - _lastCallPlayoutMillis >= 10) {
-        _critSect.Leave();
-        _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
-        _critSect.Enter();
-
-        _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
-        RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
-        if (_outputFile.is_open()) {
-          _outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
-        }
-        _lastCallPlayoutMillis = currentTime;
-    }
-    _playoutFramesLeft = 0;
+  if (_lastCallPlayoutMillis == 0 ||
+      currentTime - _lastCallPlayoutMillis >= 10) {
     _critSect.Leave();
-
-    int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
-    if (deltaTimeMillis < 10) {
-      SleepMs(10 - deltaTimeMillis);
-    }
-
-    return true;
-}
-
-bool FileAudioDevice::RecThreadProcess()
-{
-    if (!_recording) {
-        return false;
-    }
-
-    int64_t currentTime = rtc::TimeMillis();
+    _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
     _critSect.Enter();
 
-    if (_lastCallRecordMillis == 0 ||
-        currentTime - _lastCallRecordMillis >= 10) {
-      if (_inputFile.is_open()) {
-        if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
-          _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
-                                             _recordingFramesIn10MS);
-        } else {
-          _inputFile.Rewind();
-        }
-        _lastCallRecordMillis = currentTime;
-        _critSect.Leave();
-        _ptrAudioBuffer->DeliverRecordedData();
-        _critSect.Enter();
+    _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+    RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
+    if (_outputFile.is_open()) {
+      _outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
+    }
+    _lastCallPlayoutMillis = currentTime;
+  }
+  _playoutFramesLeft = 0;
+  _critSect.Leave();
+
+  int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+  if (deltaTimeMillis < 10) {
+    SleepMs(10 - deltaTimeMillis);
+  }
+
+  return true;
+}
+
+bool FileAudioDevice::RecThreadProcess() {
+  if (!_recording) {
+    return false;
+  }
+
+  int64_t currentTime = rtc::TimeMillis();
+  _critSect.Enter();
+
+  if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) {
+    if (_inputFile.is_open()) {
+      if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
+        _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+                                           _recordingFramesIn10MS);
+      } else {
+        _inputFile.Rewind();
       }
+      _lastCallRecordMillis = currentTime;
+      _critSect.Leave();
+      _ptrAudioBuffer->DeliverRecordedData();
+      _critSect.Enter();
     }
+  }
 
-    _critSect.Leave();
+  _critSect.Leave();
 
-    int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
-    if (deltaTimeMillis < 10) {
-      SleepMs(10 - deltaTimeMillis);
-    }
+  int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+  if (deltaTimeMillis < 10) {
+    SleepMs(10 - deltaTimeMillis);
+  }
 
-    return true;
+  return true;
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_device/dummy/file_audio_device_factory.cc b/modules/audio_device/dummy/file_audio_device_factory.cc
index 96e3eaf..1739953 100644
--- a/modules/audio_device/dummy/file_audio_device_factory.cc
+++ b/modules/audio_device/dummy/file_audio_device_factory.cc
@@ -36,7 +36,8 @@
 }
 
 void FileAudioDeviceFactory::SetFilenamesToUse(
-    const char* inputAudioFilename, const char* outputAudioFilename) {
+    const char* inputAudioFilename,
+    const char* outputAudioFilename) {
 #ifdef WEBRTC_DUMMY_FILE_DEVICES
   RTC_DCHECK_LT(strlen(inputAudioFilename), MAX_FILENAME_LEN);
   RTC_DCHECK_LT(strlen(outputAudioFilename), MAX_FILENAME_LEN);
@@ -47,8 +48,9 @@
   _isConfigured = true;
 #else
   // Sanity: must be compiled with the right define to run this.
-  printf("Trying to use dummy file devices, but is not compiled "
-         "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
+  printf(
+      "Trying to use dummy file devices, but is not compiled "
+      "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
   std::exit(1);
 #endif
 }
diff --git a/modules/audio_device/ios/audio_device_ios.mm b/modules/audio_device/ios/audio_device_ios.mm
index 07d4660..d0a9be5 100644
--- a/modules/audio_device/ios/audio_device_ios.mm
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -34,7 +34,6 @@
 #import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
 #import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
 
-
 namespace webrtc {
 
 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
@@ -56,7 +55,6 @@
     }                                          \
   } while (0)
 
-
 // Hardcoded delay estimates based on real measurements.
 // TODO(henrika): these value is not used in combination with built-in AEC.
 // Can most likely be removed.
@@ -93,8 +91,8 @@
     LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
     LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
     LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
-#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \
-    && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
+#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
+    __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
     LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
 #endif
 #if TARGET_IPHONE_SIMULATOR
@@ -121,8 +119,7 @@
   LOGI() << "ctor" << ios::GetCurrentThreadDescription();
   io_thread_checker_.DetachFromThread();
   thread_ = rtc::Thread::Current();
-  audio_session_observer_ =
-      [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
+  audio_session_observer_ = [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
 }
 
 AudioDeviceIOS::~AudioDeviceIOS() {
@@ -152,12 +149,9 @@
   // here. They have not been set and confirmed yet since configureForWebRTC
   // is not called until audio is about to start. However, it makes sense to
   // store the parameters now and then verify at a later stage.
-  RTCAudioSessionConfiguration* config =
-      [RTCAudioSessionConfiguration webRTCConfiguration];
-  playout_parameters_.reset(config.sampleRate,
-                            config.outputNumberOfChannels);
-  record_parameters_.reset(config.sampleRate,
-                           config.inputNumberOfChannels);
+  RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
+  playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
+  record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
   // Ensure that the audio device buffer (ADB) knows about the internal audio
   // parameters. Note that, even if we are unable to get a mono audio session,
   // we will always tell the I/O audio unit to do a channel format conversion
@@ -235,8 +229,7 @@
   if (fine_audio_buffer_) {
     fine_audio_buffer_->ResetPlayout();
   }
-  if (!recording_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+  if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
     if (!audio_unit_->Start()) {
       RTCLogError(@"StartPlayout failed to start audio unit.");
       return -1;
@@ -269,9 +262,8 @@
     average_number_of_playout_callbacks_between_glitches =
         num_playout_callbacks_ / num_detected_playout_glitches_;
   }
-  RTC_HISTOGRAM_COUNTS_100000(
-      "WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
-      average_number_of_playout_callbacks_between_glitches);
+  RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
+                              average_number_of_playout_callbacks_between_glitches);
   RTCLog(@"Average number of playout callbacks between glitches: %d",
          average_number_of_playout_callbacks_between_glitches);
   return 0;
@@ -286,8 +278,7 @@
   if (fine_audio_buffer_) {
     fine_audio_buffer_->ResetRecord();
   }
-  if (!playing_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+  if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
     if (!audio_unit_->Start()) {
       RTCLogError(@"StartRecording failed to start audio unit.");
       return -1;
@@ -333,9 +324,8 @@
     options = AVAudioSessionCategoryOptionDefaultToSpeaker;
   }
   NSError* error = nil;
-  BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
-                          withOptions:options
-                                error:&error];
+  BOOL success =
+      [session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:options error:&error];
   ios::CheckAndLogError(success, error);
   [session unlockForConfiguration];
   return (error == nil) ? 0 : -1;
@@ -389,7 +379,9 @@
 
 void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
   RTC_DCHECK(thread_);
-  thread_->Post(RTC_FROM_HERE, this, kMessageTypeCanPlayOrRecordChange,
+  thread_->Post(RTC_FROM_HERE,
+                this,
+                kMessageTypeCanPlayOrRecordChange,
                 new rtc::TypedMessageData<bool>(can_play_or_record));
 }
 
@@ -406,11 +398,9 @@
   RTC_DCHECK_RUN_ON(&io_thread_checker_);
   OSStatus result = noErr;
   // Simply return if recording is not enabled.
-  if (!rtc::AtomicOps::AcquireLoad(&recording_))
-    return result;
+  if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
 
-  const size_t num_bytes =
-      num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
+  const size_t num_bytes = num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
   // Set the size of our own audio buffer and clear it first to avoid copying
   // in combination with potential reallocations.
   // On real iOS devices, the size will only be set once (at first callback).
@@ -435,8 +425,7 @@
   // We can make the audio unit provide a buffer instead in io_data, but we
   // currently just use our own.
   // TODO(henrika): should error handling be improved?
-  result = audio_unit_->Render(
-      flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
+  result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
   if (result != noErr) {
     RTCLogError(@"Failed to render audio.");
     return result;
@@ -445,9 +434,8 @@
   // Get a pointer to the recorded audio and send it to the WebRTC ADB.
   // Use the FineAudioBuffer instance to convert between native buffer size
   // and the 10ms buffer size used by WebRTC.
-  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_,
-                                          kFixedPlayoutDelayEstimate,
-                                          kFixedRecordDelayEstimate);
+  fine_audio_buffer_->DeliverRecordedData(
+      record_audio_buffer_, kFixedPlayoutDelayEstimate, kFixedRecordDelayEstimate);
   return noErr;
 }
 
@@ -465,8 +453,7 @@
   // Get pointer to internal audio buffer to which new audio data shall be
   // written.
   const size_t size_in_bytes = audio_buffer->mDataByteSize;
-  RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
-               num_frames);
+  RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
   int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData);
   // Produce silence and give audio unit a hint about it if playout is not
   // activated.
@@ -508,12 +495,11 @@
   // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
   // the native I/O audio unit) and copy the result to the audio buffer in the
   // |io_data| destination.
-  fine_audio_buffer_->GetPlayoutData(
-      rtc::ArrayView<int8_t>(destination, size_in_bytes));
+  fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
   return noErr;
 }
 
-void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
+void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
   switch (msg->message_id) {
     case kMessageTypeInterruptionBegin:
       HandleInterruptionBegin();
@@ -525,8 +511,7 @@
       HandleValidRouteChange();
       break;
     case kMessageTypeCanPlayOrRecordChange: {
-      rtc::TypedMessageData<bool>* data =
-          static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
+      rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
       HandleCanPlayOrRecordChange(data->data());
       delete data;
       break;
@@ -542,10 +527,8 @@
 
 void AudioDeviceIOS::HandleInterruptionBegin() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
-  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.",
-         is_interrupted_);
-  if (audio_unit_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
+  if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
     RTCLog(@"Stopping the audio unit due to interruption begin.");
     if (!audio_unit_->Stop()) {
       RTCLogError(@"Failed to stop the audio unit for interruption begin.");
@@ -566,7 +549,8 @@
 void AudioDeviceIOS::HandleInterruptionEnd() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
-         "Updating audio unit state.", is_interrupted_);
+          "Updating audio unit state.",
+         is_interrupted_);
   is_interrupted_ = false;
   UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
 }
@@ -589,15 +573,13 @@
 
   // Don't do anything if we're interrupted.
   if (is_interrupted_) {
-    RTCLog(@"Ignoring sample rate change to %f due to interruption.",
-           sample_rate);
+    RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
     return;
   }
 
   // If we don't have an audio unit yet, or the audio unit is uninitialized,
   // there is no work to do.
-  if (!audio_unit_ ||
-      audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
+  if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
     return;
   }
 
@@ -609,8 +591,7 @@
   const size_t session_frames_per_buffer =
       static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
   const double current_sample_rate = playout_parameters_.sample_rate();
-  const size_t current_frames_per_buffer =
-      playout_parameters_.frames_per_buffer();
+  const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
   RTCLog(@"Handling playout sample rate change to: %f\n"
           "  Session sample rate: %f frames_per_buffer: %lu\n"
           "  ADM sample rate: %f frames_per_buffer: %lu",
@@ -652,15 +633,13 @@
   // Initialize the audio unit again with the new sample rate.
   RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
   if (!audio_unit_->Initialize(session_sample_rate)) {
-    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
-                session_sample_rate);
+    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
     return;
   }
 
   // Restart the audio unit if it was already running.
   if (restart_audio_unit && !audio_unit_->Start()) {
-    RTCLogError(@"Failed to start audio unit with sample rate: %f",
-                session_sample_rate);
+    RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
     return;
   }
   RTCLog(@"Successfully handled sample rate change.");
@@ -682,8 +661,7 @@
     return;
   }
   num_detected_playout_glitches_++;
-  RTCLog(@"Number of detected playout glitches: %lld",
-         num_detected_playout_glitches_);
+  RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
 
   int64_t glitch_count = num_detected_playout_glitches_;
   dispatch_async(dispatch_get_main_queue(), ^{
@@ -712,8 +690,7 @@
   // Inform the audio device buffer (ADB) about the new audio format.
   audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
   audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
-  audio_device_buffer_->SetRecordingSampleRate(
-      record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
   audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
 }
 
@@ -729,8 +706,7 @@
   // hardware sample rate but continue and use the non-ideal sample rate after
   // reinitializing the audio parameters. Most BT headsets only support 8kHz or
   // 16kHz.
-  RTCAudioSessionConfiguration* webRTCConfig =
-      [RTCAudioSessionConfiguration webRTCConfiguration];
+  RTCAudioSessionConfiguration* webRTCConfig = [RTCAudioSessionConfiguration webRTCConfiguration];
   if (sample_rate != webRTCConfig.sampleRate) {
     LOG(LS_WARNING) << "Unable to set the preferred sample rate";
   }
@@ -740,18 +716,13 @@
   // number of audio frames.
   // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
   // Hence, 128 is the size we expect to see in upcoming render callbacks.
-  playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
-                            io_buffer_duration);
+  playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
   RTC_DCHECK(playout_parameters_.is_complete());
-  record_parameters_.reset(sample_rate, record_parameters_.channels(),
-                           io_buffer_duration);
+  record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
   RTC_DCHECK(record_parameters_.is_complete());
-  LOG(LS_INFO) << " frames per I/O buffer: "
-               << playout_parameters_.frames_per_buffer();
-  LOG(LS_INFO) << " bytes per I/O buffer: "
-               << playout_parameters_.GetBytesPerBuffer();
-  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
-                record_parameters_.GetBytesPerBuffer());
+  LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
+  LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
+  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
 
   // Update the ADB parameters since the sample rate might have changed.
   UpdateAudioDeviceBuffer();
@@ -781,7 +752,8 @@
 void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
-         can_play_or_record, is_interrupted_);
+         can_play_or_record,
+         is_interrupted_);
 
   if (is_interrupted_) {
     RTCLog(@"Ignoring audio unit update due to interruption.");
@@ -790,8 +762,7 @@
 
   // If we're not initialized we don't need to do anything. Audio unit will
   // be initialized on initialization.
-  if (!audio_is_initialized_)
-    return;
+  if (!audio_is_initialized_) return;
 
   // If we're initialized, we must have an audio unit.
   RTC_DCHECK(audio_unit_);
@@ -809,13 +780,11 @@
     case VoiceProcessingAudioUnit::kUninitialized:
       RTCLog(@"VPAU state: Uninitialized");
       should_initialize_audio_unit = can_play_or_record;
-      should_start_audio_unit = should_initialize_audio_unit &&
-          (playing_ || recording_);
+      should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
       break;
     case VoiceProcessingAudioUnit::kInitialized:
       RTCLog(@"VPAU state: Initialized");
-      should_start_audio_unit =
-          can_play_or_record && (playing_ || recording_);
+      should_start_audio_unit = can_play_or_record && (playing_ || recording_);
       should_uninitialize_audio_unit = !can_play_or_record;
       break;
     case VoiceProcessingAudioUnit::kStarted:
@@ -916,8 +885,7 @@
   NSError* error = nil;
   if (![session beginWebRTCSession:&error]) {
     [session unlockForConfiguration];
-    RTCLogError(@"Failed to begin WebRTC session: %@",
-                error.localizedDescription);
+    RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
     return false;
   }
 
diff --git a/modules/audio_device/ios/audio_device_not_implemented_ios.mm b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
index 6dfc02b..4de2653 100644
--- a/modules/audio_device/ios/audio_device_not_implemented_ios.mm
+++ b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
@@ -15,8 +15,7 @@
 
 namespace webrtc {
 
-int32_t AudioDeviceIOS::ActiveAudioLayer(
-    AudioDeviceModule::AudioLayer& audioLayer) const {
+int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
   audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
   return 0;
 }
@@ -199,8 +198,7 @@
   return 0;
 }
 
-int32_t AudioDeviceIOS::SetRecordingDevice(
-    AudioDeviceModule::WindowsDeviceType) {
+int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
   RTC_NOTREACHED() << "Not implemented";
   return -1;
 }
diff --git a/modules/audio_device/linux/audio_device_alsa_linux.cc b/modules/audio_device/linux/audio_device_alsa_linux.cc
index 0a98efd..0cc484f 100644
--- a/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -26,119 +26,115 @@
 
 // Redefine these here to be able to do late-binding
 #undef snd_ctl_card_info_alloca
-#define snd_ctl_card_info_alloca(ptr) \
-        do { *ptr = (snd_ctl_card_info_t *) \
-            __builtin_alloca (LATE(snd_ctl_card_info_sizeof)()); \
-            memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); } while (0)
+#define snd_ctl_card_info_alloca(ptr)                  \
+  do {                                                 \
+    *ptr = (snd_ctl_card_info_t*)__builtin_alloca(     \
+        LATE(snd_ctl_card_info_sizeof)());             \
+    memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); \
+  } while (0)
 
 #undef snd_pcm_info_alloca
-#define snd_pcm_info_alloca(pInfo) \
-       do { *pInfo = (snd_pcm_info_t *) \
-       __builtin_alloca (LATE(snd_pcm_info_sizeof)()); \
-       memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); } while (0)
+#define snd_pcm_info_alloca(pInfo)                                           \
+  do {                                                                       \
+    *pInfo = (snd_pcm_info_t*)__builtin_alloca(LATE(snd_pcm_info_sizeof)()); \
+    memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)());                          \
+  } while (0)
 
 // snd_lib_error_handler_t
-void WebrtcAlsaErrorHandler(const char *file,
-                          int line,
-                          const char *function,
-                          int err,
-                          const char *fmt,...){};
+void WebrtcAlsaErrorHandler(const char* file,
+                            int line,
+                            const char* function,
+                            int err,
+                            const char* fmt,
+                            ...){};
 
-namespace webrtc
-{
+namespace webrtc {
 static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
 static const unsigned int ALSA_PLAYOUT_CH = 2;
-static const unsigned int ALSA_PLAYOUT_LATENCY = 40*1000; // in us
+static const unsigned int ALSA_PLAYOUT_LATENCY = 40 * 1000;  // in us
 static const unsigned int ALSA_CAPTURE_FREQ = 48000;
 static const unsigned int ALSA_CAPTURE_CH = 2;
-static const unsigned int ALSA_CAPTURE_LATENCY = 40*1000; // in us
-static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
+static const unsigned int ALSA_CAPTURE_LATENCY = 40 * 1000;  // in us
+static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5;     // in ms
 
 #define FUNC_GET_NUM_OF_DEVICE 0
 #define FUNC_GET_DEVICE_NAME 1
 #define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
 
-AudioDeviceLinuxALSA::AudioDeviceLinuxALSA() :
-    _ptrAudioBuffer(NULL),
-    _inputDeviceIndex(0),
-    _outputDeviceIndex(0),
-    _inputDeviceIsSpecified(false),
-    _outputDeviceIsSpecified(false),
-    _handleRecord(NULL),
-    _handlePlayout(NULL),
-    _recordingBuffersizeInFrame(0),
-    _recordingPeriodSizeInFrame(0),
-    _playoutBufferSizeInFrame(0),
-    _playoutPeriodSizeInFrame(0),
-    _recordingBufferSizeIn10MS(0),
-    _playoutBufferSizeIn10MS(0),
-    _recordingFramesIn10MS(0),
-    _playoutFramesIn10MS(0),
-    _recordingFreq(ALSA_CAPTURE_FREQ),
-    _playoutFreq(ALSA_PLAYOUT_FREQ),
-    _recChannels(ALSA_CAPTURE_CH),
-    _playChannels(ALSA_PLAYOUT_CH),
-    _recordingBuffer(NULL),
-    _playoutBuffer(NULL),
-    _recordingFramesLeft(0),
-    _playoutFramesLeft(0),
-    _initialized(false),
-    _recording(false),
-    _playing(false),
-    _recIsInitialized(false),
-    _playIsInitialized(false),
-    _AGC(false),
-    _recordingDelay(0),
-    _playoutDelay(0)
-{
-    memset(_oldKeyState, 0, sizeof(_oldKeyState));
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioDeviceLinuxALSA::AudioDeviceLinuxALSA()
+    : _ptrAudioBuffer(NULL),
+      _inputDeviceIndex(0),
+      _outputDeviceIndex(0),
+      _inputDeviceIsSpecified(false),
+      _outputDeviceIsSpecified(false),
+      _handleRecord(NULL),
+      _handlePlayout(NULL),
+      _recordingBuffersizeInFrame(0),
+      _recordingPeriodSizeInFrame(0),
+      _playoutBufferSizeInFrame(0),
+      _playoutPeriodSizeInFrame(0),
+      _recordingBufferSizeIn10MS(0),
+      _playoutBufferSizeIn10MS(0),
+      _recordingFramesIn10MS(0),
+      _playoutFramesIn10MS(0),
+      _recordingFreq(ALSA_CAPTURE_FREQ),
+      _playoutFreq(ALSA_PLAYOUT_FREQ),
+      _recChannels(ALSA_CAPTURE_CH),
+      _playChannels(ALSA_PLAYOUT_CH),
+      _recordingBuffer(NULL),
+      _playoutBuffer(NULL),
+      _recordingFramesLeft(0),
+      _playoutFramesLeft(0),
+      _initialized(false),
+      _recording(false),
+      _playing(false),
+      _recIsInitialized(false),
+      _playIsInitialized(false),
+      _AGC(false),
+      _recordingDelay(0),
+      _playoutDelay(0) {
+  memset(_oldKeyState, 0, sizeof(_oldKeyState));
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 }
 
 // ----------------------------------------------------------------------------
 //  AudioDeviceLinuxALSA - dtor
 // ----------------------------------------------------------------------------
 
-AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Terminate();
+  Terminate();
 
-    // Clean up the recording buffer and playout buffer.
-    if (_recordingBuffer)
-    {
-        delete [] _recordingBuffer;
-        _recordingBuffer = NULL;
-    }
-    if (_playoutBuffer)
-    {
-        delete [] _playoutBuffer;
-        _playoutBuffer = NULL;
-    }
+  // Clean up the recording buffer and playout buffer.
+  if (_recordingBuffer) {
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
+  }
+  if (_playoutBuffer) {
+    delete[] _playoutBuffer;
+    _playoutBuffer = NULL;
+  }
 }
 
-void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
-{
+void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  _ptrAudioBuffer = audioBuffer;
 
-    _ptrAudioBuffer = audioBuffer;
-
-    // Inform the AudioBuffer about default settings for this implementation.
-    // Set all values to zero here since the actual settings will be done by
-    // InitPlayout and InitRecording later.
-    _ptrAudioBuffer->SetRecordingSampleRate(0);
-    _ptrAudioBuffer->SetPlayoutSampleRate(0);
-    _ptrAudioBuffer->SetRecordingChannels(0);
-    _ptrAudioBuffer->SetPlayoutChannels(0);
+  // Inform the AudioBuffer about default settings for this implementation.
+  // Set all values to zero here since the actual settings will be done by
+  // InitPlayout and InitRecording later.
+  _ptrAudioBuffer->SetRecordingSampleRate(0);
+  _ptrAudioBuffer->SetPlayoutSampleRate(0);
+  _ptrAudioBuffer->SetRecordingChannels(0);
+  _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
 int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
-    AudioDeviceModule::AudioLayer& audioLayer) const
-{
-    audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
-    return 0;
+    AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
+  return 0;
 }
 
 AudioDeviceGeneric::InitStatus AudioDeviceLinuxALSA::Init() {
@@ -155,654 +151,541 @@
     return InitStatus::OK;
   }
 #if defined(USE_X11)
-    //Get X display handle for typing detection
-    _XDisplay = XOpenDisplay(NULL);
-    if (!_XDisplay) {
-      LOG(LS_WARNING)
-          << "failed to open X display, typing detection will not work";
-    }
+  // Get X display handle for typing detection
+  _XDisplay = XOpenDisplay(NULL);
+  if (!_XDisplay) {
+    LOG(LS_WARNING)
+        << "failed to open X display, typing detection will not work";
+  }
 #endif
 
-    _initialized = true;
+  _initialized = true;
 
-    return InitStatus::OK;
+  return InitStatus::OK;
 }
 
-int32_t AudioDeviceLinuxALSA::Terminate()
-{
-    if (!_initialized)
-    {
-        return 0;
-    }
+int32_t AudioDeviceLinuxALSA::Terminate() {
+  if (!_initialized) {
+    return 0;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    _mixerManager.Close();
+  _mixerManager.Close();
 
-    // RECORDING
-    if (_ptrThreadRec)
-    {
-        rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
-        _critSect.Leave();
+  // RECORDING
+  if (_ptrThreadRec) {
+    rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
+    _critSect.Leave();
 
-        tmpThread->Stop();
-        delete tmpThread;
+    tmpThread->Stop();
+    delete tmpThread;
 
-        _critSect.Enter();
-    }
+    _critSect.Enter();
+  }
 
-    // PLAYOUT
-    if (_ptrThreadPlay)
-    {
-        rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
-        _critSect.Leave();
+  // PLAYOUT
+  if (_ptrThreadPlay) {
+    rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
+    _critSect.Leave();
 
-        tmpThread->Stop();
-        delete tmpThread;
+    tmpThread->Stop();
+    delete tmpThread;
 
-        _critSect.Enter();
-    }
+    _critSect.Enter();
+  }
 #if defined(USE_X11)
-    if (_XDisplay)
-    {
-      XCloseDisplay(_XDisplay);
-      _XDisplay = NULL;
-    }
+  if (_XDisplay) {
+    XCloseDisplay(_XDisplay);
+    _XDisplay = NULL;
+  }
 #endif
-    _initialized = false;
-    _outputDeviceIsSpecified = false;
-    _inputDeviceIsSpecified = false;
+  _initialized = false;
+  _outputDeviceIsSpecified = false;
+  _inputDeviceIsSpecified = false;
 
+  return 0;
+}
+
+bool AudioDeviceLinuxALSA::Initialized() const {
+  return (_initialized);
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeaker() {
+  rtc::CritScope lock(&_critSect);
+
+  if (_playing) {
+    return -1;
+  }
+
+  char devName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+  return _mixerManager.OpenSpeaker(devName);
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophone() {
+  rtc::CritScope lock(&_critSect);
+
+  if (_recording) {
+    return -1;
+  }
+
+  char devName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+  return _mixerManager.OpenMicrophone(devName);
+}
+
+bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const {
+  return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const {
+  return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+  bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+  // Make an attempt to open up the
+  // output mixer corresponding to the currently selected output device.
+  if (!wasInitialized && InitSpeaker() == -1) {
+    // If we end up here it means that the selected speaker has no volume
+    // control.
+    available = false;
     return 0;
+  }
+
+  // Given that InitSpeaker was successful, we know that a volume control
+  // exists
+  available = true;
+
+  // Close the initialized output mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseSpeaker();
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Initialized() const
-{
-    return (_initialized);
+int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+  return (_mixerManager.SetSpeakerVolume(volume));
 }
 
-int32_t AudioDeviceLinuxALSA::InitSpeaker()
-{
+int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+  uint32_t level(0);
 
-    rtc::CritScope lock(&_critSect);
+  if (_mixerManager.SpeakerVolume(level) == -1) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
-    }
+  volume = level;
 
-    char devName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
-    return _mixerManager.OpenSpeaker(devName);
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::InitMicrophone()
-{
+int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  uint32_t maxVol(0);
 
-    rtc::CritScope lock(&_critSect);
+  if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
-    }
+  maxVolume = maxVol;
 
-    char devName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
-    return _mixerManager.OpenMicrophone(devName);
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const
-{
-    return (_mixerManager.SpeakerIsInitialized());
+int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(uint32_t& minVolume) const {
+  uint32_t minVol(0);
+
+  if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+    return -1;
+  }
+
+  minVolume = minVol;
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const
-{
-    return (_mixerManager.MicrophoneIsInitialized());
-}
+int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+  bool isAvailable(false);
+  bool wasInitialized = _mixerManager.SpeakerIsInitialized();
 
-int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available)
-{
-
-    bool wasInitialized = _mixerManager.SpeakerIsInitialized();
-
-    // Make an attempt to open up the
-    // output mixer corresponding to the currently selected output device.
-    if (!wasInitialized && InitSpeaker() == -1)
-    {
-        // If we end up here it means that the selected speaker has no volume
-        // control.
-        available = false;
-        return 0;
-    }
-
-    // Given that InitSpeaker was successful, we know that a volume control
-    // exists
-    available = true;
-
-    // Close the initialized output mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseSpeaker();
-    }
-
+  // Make an attempt to open up the
+  // output mixer corresponding to the currently selected output device.
+  //
+  if (!wasInitialized && InitSpeaker() == -1) {
+    // If we end up here it means that the selected speaker has no volume
+    // control, hence it is safe to state that there is no mute control
+    // already at this stage.
+    available = false;
     return 0;
+  }
+
+  // Check if the selected speaker has a mute control
+  _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+  available = isAvailable;
+
+  // Close the initialized output mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseSpeaker();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume)
-{
-
-    return (_mixerManager.SetSpeakerVolume(volume));
+int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) {
+  return (_mixerManager.SetSpeakerMute(enable));
 }
 
-int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const {
+  bool muted(0);
 
-    uint32_t level(0);
+  if (_mixerManager.SpeakerMute(muted) == -1) {
+    return -1;
+  }
 
-    if (_mixerManager.SpeakerVolume(level) == -1)
-    {
-        return -1;
-    }
+  enabled = muted;
 
-    volume = level;
+  return 0;
+}
 
+int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+  bool isAvailable(false);
+  bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+  // Make an attempt to open up the
+  // input mixer corresponding to the currently selected input device.
+  //
+  if (!wasInitialized && InitMicrophone() == -1) {
+    // If we end up here it means that the selected microphone has no volume
+    // control, hence it is safe to state that there is no mute control
+    // already at this stage.
+    available = false;
     return 0;
+  }
+
+  // Check if the selected microphone has a mute control
+  //
+  _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+  available = isAvailable;
+
+  // Close the initialized input mixer
+  //
+  if (!wasInitialized) {
+    _mixerManager.CloseMicrophone();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(
-    uint32_t& maxVolume) const
-{
-
-    uint32_t maxVol(0);
-
-    if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
-    {
-        return -1;
-    }
-
-    maxVolume = maxVol;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(
-    uint32_t& minVolume) const
-{
-
-    uint32_t minVol(0);
-
-    if (_mixerManager.MinSpeakerVolume(minVol) == -1)
-    {
-        return -1;
-    }
-
-    minVolume = minVol;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available)
-{
-
-    bool isAvailable(false);
-    bool wasInitialized = _mixerManager.SpeakerIsInitialized();
-
-    // Make an attempt to open up the
-    // output mixer corresponding to the currently selected output device.
-    //
-    if (!wasInitialized && InitSpeaker() == -1)
-    {
-        // If we end up here it means that the selected speaker has no volume
-        // control, hence it is safe to state that there is no mute control
-        // already at this stage.
-        available = false;
-        return 0;
-    }
-
-    // Check if the selected speaker has a mute control
-    _mixerManager.SpeakerMuteIsAvailable(isAvailable);
-
-    available = isAvailable;
-
-    // Close the initialized output mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseSpeaker();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable)
-{
-    return (_mixerManager.SetSpeakerMute(enable));
-}
-
-int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const
-{
-
-    bool muted(0);
-
-    if (_mixerManager.SpeakerMute(muted) == -1)
-    {
-        return -1;
-    }
-
-    enabled = muted;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available)
-{
-
-    bool isAvailable(false);
-    bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
-    // Make an attempt to open up the
-    // input mixer corresponding to the currently selected input device.
-    //
-    if (!wasInitialized && InitMicrophone() == -1)
-    {
-        // If we end up here it means that the selected microphone has no volume
-        // control, hence it is safe to state that there is no mute control
-        // already at this stage.
-        available = false;
-        return 0;
-    }
-
-    // Check if the selected microphone has a mute control
-    //
-    _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
-    available = isAvailable;
-
-    // Close the initialized input mixer
-    //
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseMicrophone();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable)
-{
-    return (_mixerManager.SetMicrophoneMute(enable));
+int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) {
+  return (_mixerManager.SetMicrophoneMute(enable));
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const {
+  bool muted(0);
 
-    bool muted(0);
+  if (_mixerManager.MicrophoneMute(muted) == -1) {
+    return -1;
+  }
 
-    if (_mixerManager.MicrophoneMute(muted) == -1)
-    {
-        return -1;
-    }
-
-    enabled = muted;
-    return 0;
+  enabled = muted;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available)
-{
+int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
-
-    // If we already have initialized in stereo it's obviously available
-    if (_recIsInitialized && (2 == _recChannels))
-    {
-        available = true;
-        return 0;
-    }
-
-    // Save rec states and the number of rec channels
-    bool recIsInitialized = _recIsInitialized;
-    bool recording = _recording;
-    int recChannels = _recChannels;
-
-    available = false;
-
-    // Stop/uninitialize recording if initialized (and possibly started)
-    if (_recIsInitialized)
-    {
-        StopRecording();
-    }
-
-    // Try init in stereo;
-    _recChannels = 2;
-    if (InitRecording() == 0)
-    {
-        available = true;
-    }
-
-    // Stop/uninitialize recording
-    StopRecording();
-
-    // Recover previous states
-    _recChannels = recChannels;
-    if (recIsInitialized)
-    {
-        InitRecording();
-    }
-    if (recording)
-    {
-        StartRecording();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable)
-{
-
-    if (enable)
-        _recChannels = 2;
-    else
-        _recChannels = 1;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const
-{
-
-    if (_recChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available)
-{
-
-    rtc::CritScope lock(&_critSect);
-
-    // If we already have initialized in stereo it's obviously available
-    if (_playIsInitialized && (2 == _playChannels))
-    {
-        available = true;
-        return 0;
-    }
-
-    // Save rec states and the number of rec channels
-    bool playIsInitialized = _playIsInitialized;
-    bool playing = _playing;
-    int playChannels = _playChannels;
-
-    available = false;
-
-    // Stop/uninitialize recording if initialized (and possibly started)
-    if (_playIsInitialized)
-    {
-        StopPlayout();
-    }
-
-    // Try init in stereo;
-    _playChannels = 2;
-    if (InitPlayout() == 0)
-    {
-        available = true;
-    }
-
-    // Stop/uninitialize recording
-    StopPlayout();
-
-    // Recover previous states
-    _playChannels = playChannels;
-    if (playIsInitialized)
-    {
-        InitPlayout();
-    }
-    if (playing)
-    {
-        StartPlayout();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable)
-{
-
-    if (enable)
-        _playChannels = 2;
-    else
-        _playChannels = 1;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const
-{
-
-    if (_playChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetAGC(bool enable)
-{
-
-    _AGC = enable;
-
-    return 0;
-}
-
-bool AudioDeviceLinuxALSA::AGC() const
-{
-
-    return _AGC;
-}
-
-int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available)
-{
-
-    bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
-    // Make an attempt to open up the
-    // input mixer corresponding to the currently selected output device.
-    if (!wasInitialized && InitMicrophone() == -1)
-    {
-        // If we end up here it means that the selected microphone has no volume
-        // control.
-        available = false;
-        return 0;
-    }
-
-    // Given that InitMicrophone was successful, we know that a volume control
-    // exists
+  // If we already have initialized in stereo it's obviously available
+  if (_recIsInitialized && (2 == _recChannels)) {
     available = true;
-
-    // Close the initialized input mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseMicrophone();
-    }
-
     return 0;
+  }
+
+  // Save rec states and the number of rec channels
+  bool recIsInitialized = _recIsInitialized;
+  bool recording = _recording;
+  int recChannels = _recChannels;
+
+  available = false;
+
+  // Stop/uninitialize recording if initialized (and possibly started)
+  if (_recIsInitialized) {
+    StopRecording();
+  }
+
+  // Try init in stereo;
+  _recChannels = 2;
+  if (InitRecording() == 0) {
+    available = true;
+  }
+
+  // Stop/uninitialize recording
+  StopRecording();
+
+  // Recover previous states
+  _recChannels = recChannels;
+  if (recIsInitialized) {
+    InitRecording();
+  }
+  if (recording) {
+    StartRecording();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume)
-{
+int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable) {
+  if (enable)
+    _recChannels = 2;
+  else
+    _recChannels = 1;
 
-    return (_mixerManager.SetMicrophoneVolume(volume));
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const {
+  if (_recChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    uint32_t level(0);
-
-    if (_mixerManager.MicrophoneVolume(level) == -1)
-    {
-        LOG(LS_WARNING) << "failed to retrive current microphone level";
-        return -1;
-    }
-
-    volume = level;
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(
-    uint32_t& maxVolume) const
-{
+int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    uint32_t maxVol(0);
-
-    if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
-    {
-        return -1;
-    }
-
-    maxVolume = maxVol;
-
+  // If we already have initialized in stereo it's obviously available
+  if (_playIsInitialized && (2 == _playChannels)) {
+    available = true;
     return 0;
+  }
+
+  // Save rec states and the number of rec channels
+  bool playIsInitialized = _playIsInitialized;
+  bool playing = _playing;
+  int playChannels = _playChannels;
+
+  available = false;
+
+  // Stop/uninitialize recording if initialized (and possibly started)
+  if (_playIsInitialized) {
+    StopPlayout();
+  }
+
+  // Try init in stereo;
+  _playChannels = 2;
+  if (InitPlayout() == 0) {
+    available = true;
+  }
+
+  // Stop/uninitialize recording
+  StopPlayout();
+
+  // Recover previous states
+  _playChannels = playChannels;
+  if (playIsInitialized) {
+    InitPlayout();
+  }
+  if (playing) {
+    StartPlayout();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(
-    uint32_t& minVolume) const
-{
+int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) {
+  if (enable)
+    _playChannels = 2;
+  else
+    _playChannels = 1;
 
-    uint32_t minVol(0);
-
-    if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
-    {
-        return -1;
-    }
-
-    minVolume = minVol;
-
-    return 0;
+  return 0;
 }
 
-int16_t AudioDeviceLinuxALSA::PlayoutDevices()
-{
+int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const {
+  if (_playChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    return (int16_t)GetDevicesInfo(0, true);
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index)
-{
+int32_t AudioDeviceLinuxALSA::SetAGC(bool enable) {
+  _AGC = enable;
 
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+  return 0;
+}
 
-    uint32_t nDevices = GetDevicesInfo(0, true);
-    LOG(LS_VERBOSE) << "number of available audio output devices is "
-                    << nDevices;
+bool AudioDeviceLinuxALSA::AGC() const {
+  return _AGC;
+}
 
-    if (index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) {
+  bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
 
-    _outputDeviceIndex = index;
-    _outputDeviceIsSpecified = true;
-
+  // Make an attempt to open up the
+  // input mixer corresponding to the currently selected output device.
+  if (!wasInitialized && InitMicrophone() == -1) {
+    // If we end up here it means that the selected microphone has no volume
+    // control.
+    available = false;
     return 0;
+  }
+
+  // Given that InitMicrophone was successful, we know that a volume control
+  // exists
+  available = true;
+
+  // Close the initialized input mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseMicrophone();
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+  return (_mixerManager.SetMicrophoneVolume(volume));
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+  uint32_t level(0);
+
+  if (_mixerManager.MicrophoneVolume(level) == -1) {
+    LOG(LS_WARNING) << "failed to retrive current microphone level";
+    return -1;
+  }
+
+  volume = level;
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  uint32_t maxVol(0);
+
+  if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+    return -1;
+  }
+
+  maxVolume = maxVol;
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(uint32_t& minVolume) const {
+  uint32_t minVol(0);
+
+  if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+    return -1;
+  }
+
+  minVolume = minVol;
+
+  return 0;
+}
+
+int16_t AudioDeviceLinuxALSA::PlayoutDevices() {
+  return (int16_t)GetDevicesInfo(0, true);
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index) {
+  if (_playIsInitialized) {
+    return -1;
+  }
+
+  uint32_t nDevices = GetDevicesInfo(0, true);
+  LOG(LS_VERBOSE) << "number of available audio output devices is " << nDevices;
+
+  if (index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
+
+  _outputDeviceIndex = index;
+  _outputDeviceIsSpecified = true;
+
+  return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
-    AudioDeviceModule::WindowsDeviceType /*device*/)
-{
-    LOG(LS_ERROR) << "WindowsDeviceType not supported";
-    return -1;
+    AudioDeviceModule::WindowsDeviceType /*device*/) {
+  LOG(LS_ERROR) << "WindowsDeviceType not supported";
+  return -1;
 }
 
 int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  const uint16_t nDevices(PlayoutDevices());
 
-    const uint16_t nDevices(PlayoutDevices());
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
-    }
+  memset(name, 0, kAdmMaxDeviceNameSize);
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
 
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
+  return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
 }
 
 int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  const uint16_t nDevices(RecordingDevices());
 
-    const uint16_t nDevices(RecordingDevices());
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
-    }
+  memset(name, 0, kAdmMaxDeviceNameSize);
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
 
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+  return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
 }
 
-int16_t AudioDeviceLinuxALSA::RecordingDevices()
-{
-
-    return (int16_t)GetDevicesInfo(0, false);
+int16_t AudioDeviceLinuxALSA::RecordingDevices() {
+  return (int16_t)GetDevicesInfo(0, false);
 }
 
-int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index)
-{
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+  uint32_t nDevices = GetDevicesInfo(0, false);
+  LOG(LS_VERBOSE) << "number of availiable audio input devices is " << nDevices;
 
-    uint32_t nDevices = GetDevicesInfo(0, false);
-    LOG(LS_VERBOSE) << "number of availiable audio input devices is "
-                    << nDevices;
+  if (index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  _inputDeviceIndex = index;
+  _inputDeviceIsSpecified = true;
 
-    _inputDeviceIndex = index;
-    _inputDeviceIsSpecified = true;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -810,1124 +693,966 @@
 // ----------------------------------------------------------------------------
 
 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
-    AudioDeviceModule::WindowsDeviceType /*device*/)
-{
-    LOG(LS_ERROR) << "WindowsDeviceType not supported";
+    AudioDeviceModule::WindowsDeviceType /*device*/) {
+  LOG(LS_ERROR) << "WindowsDeviceType not supported";
+  return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) {
+  available = false;
+
+  // Try to initialize the playout side with mono
+  // Assumes that user set num channels after calling this function
+  _playChannels = 1;
+  int32_t res = InitPlayout();
+
+  // Cancel effect of initialization
+  StopPlayout();
+
+  if (res != -1) {
+    available = true;
+  } else {
+    // It may be possible to play out in stereo
+    res = StereoPlayoutIsAvailable(available);
+    if (available) {
+      // Then set channels to 2 so InitPlayout doesn't fail
+      _playChannels = 2;
+    }
+  }
+
+  return res;
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) {
+  available = false;
+
+  // Try to initialize the recording side with mono
+  // Assumes that user set num channels after calling this function
+  _recChannels = 1;
+  int32_t res = InitRecording();
+
+  // Cancel effect of initialization
+  StopRecording();
+
+  if (res != -1) {
+    available = true;
+  } else {
+    // It may be possible to record in stereo
+    res = StereoRecordingIsAvailable(available);
+    if (available) {
+      // Then set channels to 2 so InitPlayout doesn't fail
+      _recChannels = 2;
+    }
+  }
+
+  return res;
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayout() {
+  int errVal = 0;
+
+  rtc::CritScope lock(&_critSect);
+  if (_playing) {
     return -1;
-}
+  }
 
-int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available)
-{
+  if (!_outputDeviceIsSpecified) {
+    return -1;
+  }
 
-    available = false;
-
-    // Try to initialize the playout side with mono
-    // Assumes that user set num channels after calling this function
-    _playChannels = 1;
-    int32_t res = InitPlayout();
-
-    // Cancel effect of initialization
-    StopPlayout();
-
-    if (res != -1)
-    {
-        available = true;
-    }
-    else
-    {
-        // It may be possible to play out in stereo
-        res = StereoPlayoutIsAvailable(available);
-        if (available)
-        {
-            // Then set channels to 2 so InitPlayout doesn't fail
-            _playChannels = 2;
-        }
-    }
-
-    return res;
-}
-
-int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available)
-{
-
-    available = false;
-
-    // Try to initialize the recording side with mono
-    // Assumes that user set num channels after calling this function
-    _recChannels = 1;
-    int32_t res = InitRecording();
-
-    // Cancel effect of initialization
-    StopRecording();
-
-    if (res != -1)
-    {
-        available = true;
-    }
-    else
-    {
-        // It may be possible to record in stereo
-        res = StereoRecordingIsAvailable(available);
-        if (available)
-        {
-            // Then set channels to 2 so InitPlayout doesn't fail
-            _recChannels = 2;
-        }
-    }
-
-    return res;
-}
-
-int32_t AudioDeviceLinuxALSA::InitPlayout()
-{
-
-    int errVal = 0;
-
-    rtc::CritScope lock(&_critSect);
-    if (_playing)
-    {
-        return -1;
-    }
-
-    if (!_outputDeviceIsSpecified)
-    {
-        return -1;
-    }
-
-    if (_playIsInitialized)
-    {
-        return 0;
-    }
-    // Initialize the speaker (devices might have been added or removed)
-    if (InitSpeaker() == -1)
-    {
-        LOG(LS_WARNING) << "InitSpeaker() failed";
-    }
-
-    // Start by closing any existing wave-output devices
-    //
-    if (_handlePlayout != NULL)
-    {
-        LATE(snd_pcm_close)(_handlePlayout);
-        _handlePlayout = NULL;
-        _playIsInitialized = false;
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR)
-                << "Error closing current playout sound device, error: "
-                << LATE(snd_strerror)(errVal);
-        }
-    }
-
-    // Open PCM device for playout
-    char deviceName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
-                   kAdmMaxDeviceNameSize);
-
-    LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
-
-    errVal = LATE(snd_pcm_open)
-                 (&_handlePlayout,
-                  deviceName,
-                  SND_PCM_STREAM_PLAYBACK,
-                  SND_PCM_NONBLOCK);
-
-    if (errVal == -EBUSY) // Device busy - try some more!
-    {
-        for (int i=0; i < 5; i++)
-        {
-            SleepMs(1000);
-            errVal = LATE(snd_pcm_open)
-                         (&_handlePlayout,
-                          deviceName,
-                          SND_PCM_STREAM_PLAYBACK,
-                          SND_PCM_NONBLOCK);
-            if (errVal == 0)
-            {
-                break;
-            }
-        }
-    }
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "unable to open playback device: "
-                      << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
-        _handlePlayout = NULL;
-        return -1;
-    }
-
-    _playoutFramesIn10MS = _playoutFreq/100;
-    if ((errVal = LATE(snd_pcm_set_params)( _handlePlayout,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-        SND_PCM_FORMAT_S16_BE,
-#else
-        SND_PCM_FORMAT_S16_LE, //format
-#endif
-        SND_PCM_ACCESS_RW_INTERLEAVED, //access
-        _playChannels, //channels
-        _playoutFreq, //rate
-        1, //soft_resample
-        ALSA_PLAYOUT_LATENCY //40*1000 //latency required overall latency in us
-    )) < 0)
-    {   /* 0.5sec */
-        _playoutFramesIn10MS = 0;
-        LOG(LS_ERROR) << "unable to set playback device: "
-                      << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
-        ErrorRecovery(errVal, _handlePlayout);
-        errVal = LATE(snd_pcm_close)(_handlePlayout);
-        _handlePlayout = NULL;
-        return -1;
-    }
-
-    errVal = LATE(snd_pcm_get_params)(_handlePlayout,
-        &_playoutBufferSizeInFrame, &_playoutPeriodSizeInFrame);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
-                      << " (" << errVal << ")";
-        _playoutBufferSizeInFrame = 0;
-        _playoutPeriodSizeInFrame = 0;
-    }
-    else {
-        LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
-                        << _playoutBufferSizeInFrame << " period_size :"
-                        << _playoutPeriodSizeInFrame;
-    }
-
-    if (_ptrAudioBuffer)
-    {
-        // Update webrtc audio buffer with the selected parameters
-        _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
-        _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
-    }
-
-    // Set play buffer size
-    _playoutBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
-        _handlePlayout, _playoutFramesIn10MS);
-
-    // Init varaibles used for play
-
-    if (_handlePlayout != NULL)
-    {
-        _playIsInitialized = true;
-        return 0;
-    }
-    else
-    {
-        return -1;
-    }
-
+  if (_playIsInitialized) {
     return 0;
-}
+  }
+  // Initialize the speaker (devices might have been added or removed)
+  if (InitSpeaker() == -1) {
+    LOG(LS_WARNING) << "InitSpeaker() failed";
+  }
 
-int32_t AudioDeviceLinuxALSA::InitRecording()
-{
-
-    int errVal = 0;
-
-    rtc::CritScope lock(&_critSect);
-
-    if (_recording)
-    {
-        return -1;
+  // Start by closing any existing wave-output devices
+  //
+  if (_handlePlayout != NULL) {
+    LATE(snd_pcm_close)(_handlePlayout);
+    _handlePlayout = NULL;
+    _playIsInitialized = false;
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error closing current playout sound device, error: "
+                    << LATE(snd_strerror)(errVal);
     }
+  }
 
-    if (!_inputDeviceIsSpecified)
-    {
-        return -1;
-    }
+  // Open PCM device for playout
+  char deviceName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
+                 kAdmMaxDeviceNameSize);
 
-    if (_recIsInitialized)
-    {
-        return 0;
-    }
+  LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
 
-    // Initialize the microphone (devices might have been added or removed)
-    if (InitMicrophone() == -1)
-    {
-        LOG(LS_WARNING) << "InitMicrophone() failed";
-    }
+  errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+                              SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
 
-    // Start by closing any existing pcm-input devices
-    //
-    if (_handleRecord != NULL)
-    {
-        int errVal = LATE(snd_pcm_close)(_handleRecord);
-        _handleRecord = NULL;
-        _recIsInitialized = false;
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR)
-                << "Error closing current recording sound device, error: "
-                << LATE(snd_strerror)(errVal);
-        }
-    }
-
-    // Open PCM device for recording
-    // The corresponding settings for playout are made after the record settings
-    char deviceName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
-                   kAdmMaxDeviceNameSize);
-
-    LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
-    errVal = LATE(snd_pcm_open)
-                 (&_handleRecord,
-                  deviceName,
-                  SND_PCM_STREAM_CAPTURE,
-                  SND_PCM_NONBLOCK);
-
-    // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
-    if (errVal == -EBUSY) // Device busy - try some more!
-    {
-        for (int i=0; i < 5; i++)
-        {
-            SleepMs(1000);
-            errVal = LATE(snd_pcm_open)
-                         (&_handleRecord,
-                          deviceName,
-                          SND_PCM_STREAM_CAPTURE,
-                          SND_PCM_NONBLOCK);
-            if (errVal == 0)
-            {
-                break;
-            }
-        }
-    }
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "unable to open record device: "
-                      << LATE(snd_strerror)(errVal);
-        _handleRecord = NULL;
-        return -1;
-    }
-
-    _recordingFramesIn10MS = _recordingFreq/100;
-    if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-        SND_PCM_FORMAT_S16_BE, //format
-#else
-        SND_PCM_FORMAT_S16_LE, //format
-#endif
-        SND_PCM_ACCESS_RW_INTERLEAVED, //access
-        _recChannels, //channels
-        _recordingFreq, //rate
-        1, //soft_resample
-        ALSA_CAPTURE_LATENCY //latency in us
-    )) < 0)
-    {
-         // Fall back to another mode then.
-         if (_recChannels == 1)
-           _recChannels = 2;
-         else
-           _recChannels = 1;
-
-         if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-             SND_PCM_FORMAT_S16_BE, //format
-#else
-             SND_PCM_FORMAT_S16_LE, //format
-#endif
-             SND_PCM_ACCESS_RW_INTERLEAVED, //access
-             _recChannels, //channels
-             _recordingFreq, //rate
-             1, //soft_resample
-             ALSA_CAPTURE_LATENCY //latency in us
-         )) < 0)
-         {
-             _recordingFramesIn10MS = 0;
-             LOG(LS_ERROR) << "unable to set record settings: "
-                           << LATE(snd_strerror)(errVal) << " (" << errVal
-                           << ")";
-             ErrorRecovery(errVal, _handleRecord);
-             errVal = LATE(snd_pcm_close)(_handleRecord);
-             _handleRecord = NULL;
-             return -1;
-         }
-    }
-
-    errVal = LATE(snd_pcm_get_params)(_handleRecord,
-        &_recordingBuffersizeInFrame, &_recordingPeriodSizeInFrame);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal)
-                      << " (" << errVal << ")";
-        _recordingBuffersizeInFrame = 0;
-        _recordingPeriodSizeInFrame = 0;
-    }
-    else {
-        LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
-                        << _recordingBuffersizeInFrame << ", period_size:"
-                        << _recordingPeriodSizeInFrame;
-    }
-
-    if (_ptrAudioBuffer)
-    {
-        // Update webrtc audio buffer with the selected parameters
-        _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
-        _ptrAudioBuffer->SetRecordingChannels(_recChannels);
-    }
-
-    // Set rec buffer size and create buffer
-    _recordingBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
-        _handleRecord, _recordingFramesIn10MS);
-
-    if (_handleRecord != NULL)
-    {
-        // Mark recording side as initialized
-        _recIsInitialized = true;
-        return 0;
-    }
-    else
-    {
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StartRecording()
-{
-
-    if (!_recIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_recording)
-    {
-        return 0;
-    }
-
-    _recording = true;
-
-    int errVal = 0;
-    _recordingFramesLeft = _recordingFramesIn10MS;
-
-    // Make sure we only create the buffer once.
-    if (!_recordingBuffer)
-        _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
-    if (!_recordingBuffer)
-    {
-        LOG(LS_ERROR) << "failed to alloc recording buffer";
-        _recording = false;
-        return -1;
-    }
-    // RECORDING
-    _ptrThreadRec.reset(new rtc::PlatformThread(
-        RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
-
-    _ptrThreadRec->Start();
-    _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
-
-    errVal = LATE(snd_pcm_prepare)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
-                      << LATE(snd_strerror)(errVal) << ")\n";
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
-    errVal = LATE(snd_pcm_start)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_start err: "
-                      << LATE(snd_strerror)(errVal);
-        errVal = LATE(snd_pcm_start)(_handleRecord);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
-                          << LATE(snd_strerror)(errVal);
-            StopRecording();
-            return -1;
-        }
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StopRecording()
-{
-
-    {
-      rtc::CritScope lock(&_critSect);
-
-      if (!_recIsInitialized)
-      {
-          return 0;
+  if (errVal == -EBUSY)  // Device busy - try some more!
+  {
+    for (int i = 0; i < 5; i++) {
+      SleepMs(1000);
+      errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+                                  SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+      if (errVal == 0) {
+        break;
       }
-
-      if (_handleRecord == NULL)
-      {
-          return -1;
-      }
-
-      // Make sure we don't start recording (it's asynchronous).
-      _recIsInitialized = false;
-      _recording = false;
     }
+  }
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "unable to open playback device: "
+                  << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+    _handlePlayout = NULL;
+    return -1;
+  }
 
-    if (_ptrThreadRec)
-    {
-        _ptrThreadRec->Stop();
-        _ptrThreadRec.reset();
-    }
+  _playoutFramesIn10MS = _playoutFreq / 100;
+  if ((errVal = LATE(snd_pcm_set_params)(
+           _handlePlayout,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+           SND_PCM_FORMAT_S16_BE,
+#else
+           SND_PCM_FORMAT_S16_LE,                             // format
+#endif
+           SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+           _playChannels,                  // channels
+           _playoutFreq,                   // rate
+           1,                              // soft_resample
+           ALSA_PLAYOUT_LATENCY  // 40*1000 //latency required overall latency
+                                 // in us
+           )) < 0) {             /* 0.5sec */
+    _playoutFramesIn10MS = 0;
+    LOG(LS_ERROR) << "unable to set playback device: "
+                  << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+    ErrorRecovery(errVal, _handlePlayout);
+    errVal = LATE(snd_pcm_close)(_handlePlayout);
+    _handlePlayout = NULL;
+    return -1;
+  }
 
-    rtc::CritScope lock(&_critSect);
-    _recordingFramesLeft = 0;
-    if (_recordingBuffer)
-    {
-        delete [] _recordingBuffer;
-        _recordingBuffer = NULL;
-    }
+  errVal = LATE(snd_pcm_get_params)(_handlePlayout, &_playoutBufferSizeInFrame,
+                                    &_playoutPeriodSizeInFrame);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
+                  << " (" << errVal << ")";
+    _playoutBufferSizeInFrame = 0;
+    _playoutPeriodSizeInFrame = 0;
+  } else {
+    LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
+                    << _playoutBufferSizeInFrame
+                    << " period_size :" << _playoutPeriodSizeInFrame;
+  }
 
-    // Stop and close pcm recording device.
-    int errVal = LATE(snd_pcm_drop)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  if (_ptrAudioBuffer) {
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
+    _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
+  }
 
-    errVal = LATE(snd_pcm_close)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error closing record sound device, error: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Set play buffer size
+  _playoutBufferSizeIn10MS =
+      LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesIn10MS);
 
-    // Check if we have muted and unmute if so.
-    bool muteEnabled = false;
-    MicrophoneMute(muteEnabled);
-    if (muteEnabled)
-    {
-        SetMicrophoneMute(false);
-    }
+  // Init varaibles used for play
 
-    // set the pcm input handle to NULL
+  if (_handlePlayout != NULL) {
+    _playIsInitialized = true;
+    return 0;
+  } else {
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecording() {
+  int errVal = 0;
+
+  rtc::CritScope lock(&_critSect);
+
+  if (_recording) {
+    return -1;
+  }
+
+  if (!_inputDeviceIsSpecified) {
+    return -1;
+  }
+
+  if (_recIsInitialized) {
+    return 0;
+  }
+
+  // Initialize the microphone (devices might have been added or removed)
+  if (InitMicrophone() == -1) {
+    LOG(LS_WARNING) << "InitMicrophone() failed";
+  }
+
+  // Start by closing any existing pcm-input devices
+  //
+  if (_handleRecord != NULL) {
+    int errVal = LATE(snd_pcm_close)(_handleRecord);
     _handleRecord = NULL;
+    _recIsInitialized = false;
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error closing current recording sound device, error: "
+                    << LATE(snd_strerror)(errVal);
+    }
+  }
+
+  // Open PCM device for recording
+  // The corresponding settings for playout are made after the record settings
+  char deviceName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
+                 kAdmMaxDeviceNameSize);
+
+  LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
+  errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+                              SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+
+  // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
+  if (errVal == -EBUSY)  // Device busy - try some more!
+  {
+    for (int i = 0; i < 5; i++) {
+      SleepMs(1000);
+      errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+                                  SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+      if (errVal == 0) {
+        break;
+      }
+    }
+  }
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "unable to open record device: "
+                  << LATE(snd_strerror)(errVal);
+    _handleRecord = NULL;
+    return -1;
+  }
+
+  _recordingFramesIn10MS = _recordingFreq / 100;
+  if ((errVal =
+           LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+                                    SND_PCM_FORMAT_S16_BE,  // format
+#else
+                                    SND_PCM_FORMAT_S16_LE,    // format
+#endif
+                                    SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+                                    _recChannels,                   // channels
+                                    _recordingFreq,                 // rate
+                                    1,                    // soft_resample
+                                    ALSA_CAPTURE_LATENCY  // latency in us
+                                    )) < 0) {
+    // Fall back to another mode then.
+    if (_recChannels == 1)
+      _recChannels = 2;
+    else
+      _recChannels = 1;
+
+    if ((errVal =
+             LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+                                      SND_PCM_FORMAT_S16_BE,  // format
+#else
+                                      SND_PCM_FORMAT_S16_LE,  // format
+#endif
+                                      SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+                                      _recChannels,         // channels
+                                      _recordingFreq,       // rate
+                                      1,                    // soft_resample
+                                      ALSA_CAPTURE_LATENCY  // latency in us
+                                      )) < 0) {
+      _recordingFramesIn10MS = 0;
+      LOG(LS_ERROR) << "unable to set record settings: "
+                    << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+      ErrorRecovery(errVal, _handleRecord);
+      errVal = LATE(snd_pcm_close)(_handleRecord);
+      _handleRecord = NULL;
+      return -1;
+    }
+  }
+
+  errVal = LATE(snd_pcm_get_params)(_handleRecord, &_recordingBuffersizeInFrame,
+                                    &_recordingPeriodSizeInFrame);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal) << " ("
+                  << errVal << ")";
+    _recordingBuffersizeInFrame = 0;
+    _recordingPeriodSizeInFrame = 0;
+  } else {
+    LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
+                    << _recordingBuffersizeInFrame
+                    << ", period_size:" << _recordingPeriodSizeInFrame;
+  }
+
+  if (_ptrAudioBuffer) {
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
+    _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+  }
+
+  // Set rec buffer size and create buffer
+  _recordingBufferSizeIn10MS =
+      LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesIn10MS);
+
+  if (_handleRecord != NULL) {
+    // Mark recording side as initialized
+    _recIsInitialized = true;
     return 0;
+  } else {
+    return -1;
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::RecordingIsInitialized() const
-{
-    return (_recIsInitialized);
+int32_t AudioDeviceLinuxALSA::StartRecording() {
+  if (!_recIsInitialized) {
+    return -1;
+  }
+
+  if (_recording) {
+    return 0;
+  }
+
+  _recording = true;
+
+  int errVal = 0;
+  _recordingFramesLeft = _recordingFramesIn10MS;
+
+  // Make sure we only create the buffer once.
+  if (!_recordingBuffer)
+    _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+  if (!_recordingBuffer) {
+    LOG(LS_ERROR) << "failed to alloc recording buffer";
+    _recording = false;
+    return -1;
+  }
+  // RECORDING
+  _ptrThreadRec.reset(new rtc::PlatformThread(
+      RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
+
+  _ptrThreadRec->Start();
+  _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
+
+  errVal = LATE(snd_pcm_prepare)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
+                  << LATE(snd_strerror)(errVal) << ")\n";
+    // just log error
+    // if snd_pcm_open fails will return -1
+  }
+
+  errVal = LATE(snd_pcm_start)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_start err: "
+                  << LATE(snd_strerror)(errVal);
+    errVal = LATE(snd_pcm_start)(_handleRecord);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
+                    << LATE(snd_strerror)(errVal);
+      StopRecording();
+      return -1;
+    }
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Recording() const
-{
-    return (_recording);
-}
+int32_t AudioDeviceLinuxALSA::StopRecording() {
+  {
+    rtc::CritScope lock(&_critSect);
 
-bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const
-{
-    return (_playIsInitialized);
-}
-
-int32_t AudioDeviceLinuxALSA::StartPlayout()
-{
-    if (!_playIsInitialized)
-    {
-        return -1;
+    if (!_recIsInitialized) {
+      return 0;
     }
 
-    if (_playing)
-    {
-        return 0;
-    }
-
-    _playing = true;
-
-    _playoutFramesLeft = 0;
-    if (!_playoutBuffer)
-        _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
-    if (!_playoutBuffer)
-    {
-      LOG(LS_ERROR) << "failed to alloc playout buf";
-      _playing = false;
+    if (_handleRecord == NULL) {
       return -1;
     }
 
-    // PLAYOUT
-    _ptrThreadPlay.reset(new rtc::PlatformThread(
-        PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
-    _ptrThreadPlay->Start();
-    _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
+    // Make sure we don't start recording (it's asynchronous).
+    _recIsInitialized = false;
+    _recording = false;
+  }
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
-                      << LATE(snd_strerror)(errVal) << ")\n";
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
+  if (_ptrThreadRec) {
+    _ptrThreadRec->Stop();
+    _ptrThreadRec.reset();
+  }
 
-    return 0;
+  rtc::CritScope lock(&_critSect);
+  _recordingFramesLeft = 0;
+  if (_recordingBuffer) {
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
+  }
+
+  // Stop and close pcm recording device.
+  int errVal = LATE(snd_pcm_drop)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  errVal = LATE(snd_pcm_close)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error closing record sound device, error: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  // Check if we have muted and unmute if so.
+  bool muteEnabled = false;
+  MicrophoneMute(muteEnabled);
+  if (muteEnabled) {
+    SetMicrophoneMute(false);
+  }
+
+  // set the pcm input handle to NULL
+  _handleRecord = NULL;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::StopPlayout()
-{
+bool AudioDeviceLinuxALSA::RecordingIsInitialized() const {
+  return (_recIsInitialized);
+}
 
-    {
-        rtc::CritScope lock(&_critSect);
+bool AudioDeviceLinuxALSA::Recording() const {
+  return (_recording);
+}
 
-        if (!_playIsInitialized)
-        {
-            return 0;
-        }
+bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const {
+  return (_playIsInitialized);
+}
 
-        if (_handlePlayout == NULL)
-        {
-            return -1;
-        }
+int32_t AudioDeviceLinuxALSA::StartPlayout() {
+  if (!_playIsInitialized) {
+    return -1;
+  }
 
-        _playing = false;
-    }
+  if (_playing) {
+    return 0;
+  }
 
-    // stop playout thread first
-    if (_ptrThreadPlay)
-    {
-        _ptrThreadPlay->Stop();
-        _ptrThreadPlay.reset();
-    }
+  _playing = true;
 
+  _playoutFramesLeft = 0;
+  if (!_playoutBuffer)
+    _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
+  if (!_playoutBuffer) {
+    LOG(LS_ERROR) << "failed to alloc playout buf";
+    _playing = false;
+    return -1;
+  }
+
+  // PLAYOUT
+  _ptrThreadPlay.reset(new rtc::PlatformThread(
+      PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
+  _ptrThreadPlay->Start();
+  _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
+
+  int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
+                  << LATE(snd_strerror)(errVal) << ")\n";
+    // just log error
+    // if snd_pcm_open fails will return -1
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayout() {
+  {
     rtc::CritScope lock(&_critSect);
 
-    _playoutFramesLeft = 0;
-    delete [] _playoutBuffer;
-    _playoutBuffer = NULL;
-
-    // stop and close pcm playout device
-    int errVal = LATE(snd_pcm_drop)(_handlePlayout);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+    if (!_playIsInitialized) {
+      return 0;
     }
 
-    errVal = LATE(snd_pcm_close)(_handlePlayout);
-     if (errVal < 0)
-         LOG(LS_ERROR) << "Error closing playout sound device, error: "
-                       << LATE(snd_strerror)(errVal);
+    if (_handlePlayout == NULL) {
+      return -1;
+    }
 
-     // set the pcm input handle to NULL
-     _playIsInitialized = false;
-     _handlePlayout = NULL;
-     LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+    _playing = false;
+  }
 
-     return 0;
+  // stop playout thread first
+  if (_ptrThreadPlay) {
+    _ptrThreadPlay->Stop();
+    _ptrThreadPlay.reset();
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  _playoutFramesLeft = 0;
+  delete[] _playoutBuffer;
+  _playoutBuffer = NULL;
+
+  // stop and close pcm playout device
+  int errVal = LATE(snd_pcm_drop)(_handlePlayout);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+  }
+
+  errVal = LATE(snd_pcm_close)(_handlePlayout);
+  if (errVal < 0)
+    LOG(LS_ERROR) << "Error closing playout sound device, error: "
+                  << LATE(snd_strerror)(errVal);
+
+  // set the pcm input handle to NULL
+  _playIsInitialized = false;
+  _handlePlayout = NULL;
+  LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const
-{
-    delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
-    return 0;
+int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const {
+  delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Playing() const
-{
-    return (_playing);
+bool AudioDeviceLinuxALSA::Playing() const {
+  return (_playing);
 }
 
 // ============================================================================
 //                                 Private Methods
 // ============================================================================
 
-int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
-    const int32_t function,
-    const bool playback,
-    const int32_t enumDeviceNo,
-    char* enumDeviceName,
-    const int32_t ednLen) const
-{
+int32_t AudioDeviceLinuxALSA::GetDevicesInfo(const int32_t function,
+                                             const bool playback,
+                                             const int32_t enumDeviceNo,
+                                             char* enumDeviceName,
+                                             const int32_t ednLen) const {
+  // Device enumeration based on libjingle implementation
+  // by Tristan Schmelcher at Google Inc.
 
-    // Device enumeration based on libjingle implementation
-    // by Tristan Schmelcher at Google Inc.
+  const char* type = playback ? "Output" : "Input";
+  // dmix and dsnoop are only for playback and capture, respectively, but ALSA
+  // stupidly includes them in both lists.
+  const char* ignorePrefix = playback ? "dsnoop:" : "dmix:";
+  // (ALSA lists many more "devices" of questionable interest, but we show them
+  // just in case the weird devices may actually be desirable for some
+  // users/systems.)
 
-    const char *type = playback ? "Output" : "Input";
-    // dmix and dsnoop are only for playback and capture, respectively, but ALSA
-    // stupidly includes them in both lists.
-    const char *ignorePrefix = playback ? "dsnoop:" : "dmix:" ;
-    // (ALSA lists many more "devices" of questionable interest, but we show them
-    // just in case the weird devices may actually be desirable for some
-    // users/systems.)
+  int err;
+  int enumCount(0);
+  bool keepSearching(true);
 
-    int err;
-    int enumCount(0);
-    bool keepSearching(true);
-
-    // From Chromium issue 95797
-    // Loop through the sound cards to get Alsa device hints.
-    // Don't use snd_device_name_hint(-1,..) since there is a access violation
-    // inside this ALSA API with libasound.so.2.0.0.
-    int card = -1;
-    while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
-        void **hints;
-        err = LATE(snd_device_name_hint)(card, "pcm", &hints);
-        if (err != 0)
-        {
-            LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
-                          << LATE(snd_strerror)(err);
-            return -1;
-        }
-
-        enumCount++; // default is 0
-        if ((function == FUNC_GET_DEVICE_NAME ||
-            function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
-        {
-            strcpy(enumDeviceName, "default");
-
-            err = LATE(snd_device_name_free_hint)(hints);
-            if (err != 0)
-            {
-                LOG(LS_ERROR)
-                    << "GetDevicesInfo - device name free hint error: "
+  // From Chromium issue 95797
+  // Loop through the sound cards to get Alsa device hints.
+  // Don't use snd_device_name_hint(-1,..) since there is a access violation
+  // inside this ALSA API with libasound.so.2.0.0.
+  int card = -1;
+  while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
+    void** hints;
+    err = LATE(snd_device_name_hint)(card, "pcm", &hints);
+    if (err != 0) {
+      LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
                     << LATE(snd_strerror)(err);
-            }
-
-            return 0;
-        }
-
-        for (void **list = hints; *list != NULL; ++list)
-        {
-            char *actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
-            if (actualType)
-            {   // NULL means it's both.
-                bool wrongType = (strcmp(actualType, type) != 0);
-                free(actualType);
-                if (wrongType)
-                {
-                    // Wrong type of device (i.e., input vs. output).
-                    continue;
-                }
-            }
-
-            char *name = LATE(snd_device_name_get_hint)(*list, "NAME");
-            if (!name)
-            {
-                LOG(LS_ERROR) << "Device has no name";
-                // Skip it.
-                continue;
-            }
-
-            // Now check if we actually want to show this device.
-            if (strcmp(name, "default") != 0 &&
-                strcmp(name, "null") != 0 &&
-                strcmp(name, "pulse") != 0 &&
-                strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0)
-            {
-                // Yes, we do.
-                char *desc = LATE(snd_device_name_get_hint)(*list, "DESC");
-                if (!desc)
-                {
-                    // Virtual devices don't necessarily have descriptions.
-                    // Use their names instead.
-                    desc = name;
-                }
-
-                if (FUNC_GET_NUM_OF_DEVICE == function)
-                {
-                    LOG(LS_VERBOSE) << "Enum device " << enumCount << " - "
-                                    << name;
-
-                }
-                if ((FUNC_GET_DEVICE_NAME == function) &&
-                    (enumDeviceNo == enumCount))
-                {
-                    // We have found the enum device, copy the name to buffer.
-                    strncpy(enumDeviceName, desc, ednLen);
-                    enumDeviceName[ednLen-1] = '\0';
-                    keepSearching = false;
-                    // Replace '\n' with '-'.
-                    char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
-                    if (pret)
-                        *pret = '-';
-                }
-                if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
-                    (enumDeviceNo == enumCount))
-                {
-                    // We have found the enum device, copy the name to buffer.
-                    strncpy(enumDeviceName, name, ednLen);
-                    enumDeviceName[ednLen-1] = '\0';
-                    keepSearching = false;
-                }
-
-                if (keepSearching)
-                    ++enumCount;
-
-                if (desc != name)
-                    free(desc);
-            }
-
-            free(name);
-
-            if (!keepSearching)
-                break;
-        }
-
-        err = LATE(snd_device_name_free_hint)(hints);
-        if (err != 0)
-        {
-            LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
-                          << LATE(snd_strerror)(err);
-            // Continue and return true anyway, since we did get the whole list.
-        }
+      return -1;
     }
 
-    if (FUNC_GET_NUM_OF_DEVICE == function)
-    {
-        if (enumCount == 1) // only default?
-            enumCount = 0;
-        return enumCount; // Normal return point for function 0
+    enumCount++;  // default is 0
+    if ((function == FUNC_GET_DEVICE_NAME ||
+         function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) &&
+        enumDeviceNo == 0) {
+      strcpy(enumDeviceName, "default");
+
+      err = LATE(snd_device_name_free_hint)(hints);
+      if (err != 0) {
+        LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+                      << LATE(snd_strerror)(err);
+      }
+
+      return 0;
     }
 
-    if (keepSearching)
-    {
-        // If we get here for function 1 and 2, we didn't find the specified
-        // enum device.
-        LOG(LS_ERROR)
-            << "GetDevicesInfo - Could not find device name or numbers";
-        return -1;
+    for (void** list = hints; *list != NULL; ++list) {
+      char* actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
+      if (actualType) {  // NULL means it's both.
+        bool wrongType = (strcmp(actualType, type) != 0);
+        free(actualType);
+        if (wrongType) {
+          // Wrong type of device (i.e., input vs. output).
+          continue;
+        }
+      }
+
+      char* name = LATE(snd_device_name_get_hint)(*list, "NAME");
+      if (!name) {
+        LOG(LS_ERROR) << "Device has no name";
+        // Skip it.
+        continue;
+      }
+
+      // Now check if we actually want to show this device.
+      if (strcmp(name, "default") != 0 && strcmp(name, "null") != 0 &&
+          strcmp(name, "pulse") != 0 &&
+          strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) {
+        // Yes, we do.
+        char* desc = LATE(snd_device_name_get_hint)(*list, "DESC");
+        if (!desc) {
+          // Virtual devices don't necessarily have descriptions.
+          // Use their names instead.
+          desc = name;
+        }
+
+        if (FUNC_GET_NUM_OF_DEVICE == function) {
+          LOG(LS_VERBOSE) << "Enum device " << enumCount << " - " << name;
+        }
+        if ((FUNC_GET_DEVICE_NAME == function) && (enumDeviceNo == enumCount)) {
+          // We have found the enum device, copy the name to buffer.
+          strncpy(enumDeviceName, desc, ednLen);
+          enumDeviceName[ednLen - 1] = '\0';
+          keepSearching = false;
+          // Replace '\n' with '-'.
+          char* pret = strchr(enumDeviceName, '\n' /*0xa*/);  // LF
+          if (pret)
+            *pret = '-';
+        }
+        if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
+            (enumDeviceNo == enumCount)) {
+          // We have found the enum device, copy the name to buffer.
+          strncpy(enumDeviceName, name, ednLen);
+          enumDeviceName[ednLen - 1] = '\0';
+          keepSearching = false;
+        }
+
+        if (keepSearching)
+          ++enumCount;
+
+        if (desc != name)
+          free(desc);
+      }
+
+      free(name);
+
+      if (!keepSearching)
+        break;
     }
 
-    return 0;
+    err = LATE(snd_device_name_free_hint)(hints);
+    if (err != 0) {
+      LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+                    << LATE(snd_strerror)(err);
+      // Continue and return true anyway, since we did get the whole list.
+    }
+  }
+
+  if (FUNC_GET_NUM_OF_DEVICE == function) {
+    if (enumCount == 1)  // only default?
+      enumCount = 0;
+    return enumCount;  // Normal return point for function 0
+  }
+
+  if (keepSearching) {
+    // If we get here for function 1 and 2, we didn't find the specified
+    // enum device.
+    LOG(LS_ERROR) << "GetDevicesInfo - Could not find device name or numbers";
+    return -1;
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const
-{
-    if (_handleRecord == NULL)
-    {
-        LOG(LS_ERROR) << "input state has been modified during unlocked period";
-        return -1;
-    }
-    return 0;
+int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const {
+  if (_handleRecord == NULL) {
+    LOG(LS_ERROR) << "input state has been modified during unlocked period";
+    return -1;
+  }
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const
-{
-    if (_handlePlayout == NULL)
-    {
-        LOG(LS_ERROR)
-            << "output state has been modified during unlocked period";
-        return -1;
-    }
-    return 0;
+int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const {
+  if (_handlePlayout == NULL) {
+    LOG(LS_ERROR) << "output state has been modified during unlocked period";
+    return -1;
+  }
+  return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
-                                            snd_pcm_t* deviceHandle)
-{
-    int st = LATE(snd_pcm_state)(deviceHandle);
-    LOG(LS_VERBOSE) << "Trying to recover from "
-         << ((LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
-         ? "capture" : "playout") << " error: " << LATE(snd_strerror)(error)
-         << " (" << error << ") (state " << st << ")";
+                                            snd_pcm_t* deviceHandle) {
+  int st = LATE(snd_pcm_state)(deviceHandle);
+  LOG(LS_VERBOSE) << "Trying to recover from "
+                  << ((LATE(snd_pcm_stream)(deviceHandle) ==
+                       SND_PCM_STREAM_CAPTURE)
+                          ? "capture"
+                          : "playout")
+                  << " error: " << LATE(snd_strerror)(error) << " (" << error
+                  << ") (state " << st << ")";
 
-    // It is recommended to use snd_pcm_recover for all errors. If that function
-    // cannot handle the error, the input error code will be returned, otherwise
-    // 0 is returned. From snd_pcm_recover API doc: "This functions handles
-    // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
-    // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
-    // trying to prepare given stream for next I/O."
+  // It is recommended to use snd_pcm_recover for all errors. If that function
+  // cannot handle the error, the input error code will be returned, otherwise
+  // 0 is returned. From snd_pcm_recover API doc: "This functions handles
+  // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
+  // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
+  // trying to prepare given stream for next I/O."
 
-    /** Open */
-    //    SND_PCM_STATE_OPEN = 0,
-    /** Setup installed */
-    //    SND_PCM_STATE_SETUP,
-    /** Ready to start */
-    //    SND_PCM_STATE_PREPARED,
-    /** Running */
-    //    SND_PCM_STATE_RUNNING,
-    /** Stopped: underrun (playback) or overrun (capture) detected */
-    //    SND_PCM_STATE_XRUN,= 4
-    /** Draining: running (playback) or stopped (capture) */
-    //    SND_PCM_STATE_DRAINING,
-    /** Paused */
-    //    SND_PCM_STATE_PAUSED,
-    /** Hardware is suspended */
-    //    SND_PCM_STATE_SUSPENDED,
-    //  ** Hardware is disconnected */
-    //    SND_PCM_STATE_DISCONNECTED,
-    //    SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
+  /** Open */
+  //    SND_PCM_STATE_OPEN = 0,
+  /** Setup installed */
+  //    SND_PCM_STATE_SETUP,
+  /** Ready to start */
+  //    SND_PCM_STATE_PREPARED,
+  /** Running */
+  //    SND_PCM_STATE_RUNNING,
+  /** Stopped: underrun (playback) or overrun (capture) detected */
+  //    SND_PCM_STATE_XRUN,= 4
+  /** Draining: running (playback) or stopped (capture) */
+  //    SND_PCM_STATE_DRAINING,
+  /** Paused */
+  //    SND_PCM_STATE_PAUSED,
+  /** Hardware is suspended */
+  //    SND_PCM_STATE_SUSPENDED,
+  //  ** Hardware is disconnected */
+  //    SND_PCM_STATE_DISCONNECTED,
+  //    SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
 
-    // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
-    // in Sthlm lab.
+  // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
+  // in Sthlm lab.
 
-    int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
-    if (0 == res)
-    {
-        LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
+  int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
+  if (0 == res) {
+    LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
 
-        if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
-            _recording &&
-            LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
-        {
-            // For capture streams we also have to repeat the explicit start()
-            // to get data flowing again.
-            int err = LATE(snd_pcm_start)(deviceHandle);
-            if (err != 0)
-            {
-                LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
-                return -1;
-            }
-        }
-
-        if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
-            _playing &&
-            LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK)
-        {
-            // For capture streams we also have to repeat the explicit start() to get
-            // data flowing again.
-            int err = LATE(snd_pcm_start)(deviceHandle);
-            if (err != 0)
-            {
-              LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
-                            << LATE(snd_strerror)(err);
-              return -1;
-            }
-        }
-
-        return -EPIPE == error ? 1 : 0;
-    }
-    else {
-        LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+    if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
+        _recording &&
+        LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) {
+      // For capture streams we also have to repeat the explicit start()
+      // to get data flowing again.
+      int err = LATE(snd_pcm_start)(deviceHandle);
+      if (err != 0) {
+        LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
+        return -1;
+      }
     }
 
-    return res;
+    if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
+        _playing &&
+        LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK) {
+      // For capture streams we also have to repeat the explicit start() to get
+      // data flowing again.
+      int err = LATE(snd_pcm_start)(deviceHandle);
+      if (err != 0) {
+        LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
+                      << LATE(snd_strerror)(err);
+        return -1;
+      }
+    }
+
+    return -EPIPE == error ? 1 : 0;
+  } else {
+    LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+  }
+
+  return res;
 }
 
 // ============================================================================
 //                                  Thread Methods
 // ============================================================================
 
-bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis)
-{
-    return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
+bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis) {
+  return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
 }
 
-bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis)
-{
-    return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
+bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis) {
+  return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
 }
 
-bool AudioDeviceLinuxALSA::PlayThreadProcess()
-{
-    if(!_playing)
-        return false;
+bool AudioDeviceLinuxALSA::PlayThreadProcess() {
+  if (!_playing)
+    return false;
 
-    int err;
-    snd_pcm_sframes_t frames;
-    snd_pcm_sframes_t avail_frames;
+  int err;
+  snd_pcm_sframes_t frames;
+  snd_pcm_sframes_t avail_frames;
 
-    Lock();
-    //return a positive number of frames ready otherwise a negative error code
-    avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
-    if (avail_frames < 0)
-    {
-        LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
-                      << LATE(snd_strerror)(avail_frames);
-        ErrorRecovery(avail_frames, _handlePlayout);
-        UnLock();
-        return true;
-    }
-    else if (avail_frames == 0)
-    {
-        UnLock();
-
-        //maximum tixe in milliseconds to wait, a negative value means infinity
-        err = LATE(snd_pcm_wait)(_handlePlayout, 2);
-        if (err == 0)
-        { //timeout occured
-            LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
-        }
-
-        return true;
-    }
-
-    if (_playoutFramesLeft <= 0)
-    {
-        UnLock();
-        _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
-        Lock();
-
-        _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
-        assert(_playoutFramesLeft == _playoutFramesIn10MS);
-    }
-
-    if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
-        avail_frames = _playoutFramesLeft;
-
-    int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout,
-        _playoutFramesLeft);
-    frames = LATE(snd_pcm_writei)(
-        _handlePlayout,
-        &_playoutBuffer[_playoutBufferSizeIn10MS - size],
-        avail_frames);
-
-    if (frames < 0)
-    {
-        LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
-                        << LATE(snd_strerror)(frames);
-        _playoutFramesLeft = 0;
-        ErrorRecovery(frames, _handlePlayout);
-        UnLock();
-        return true;
-    }
-    else {
-        assert(frames==avail_frames);
-        _playoutFramesLeft -= frames;
-    }
-
+  Lock();
+  // return a positive number of frames ready otherwise a negative error code
+  avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
+  if (avail_frames < 0) {
+    LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
+                  << LATE(snd_strerror)(avail_frames);
+    ErrorRecovery(avail_frames, _handlePlayout);
     UnLock();
     return true;
-}
+  } else if (avail_frames == 0) {
+    UnLock();
 
-bool AudioDeviceLinuxALSA::RecThreadProcess()
-{
-    if (!_recording)
-        return false;
+    // maximum tixe in milliseconds to wait, a negative value means infinity
+    err = LATE(snd_pcm_wait)(_handlePlayout, 2);
+    if (err == 0) {  // timeout occured
+      LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
+    }
 
-    int err;
-    snd_pcm_sframes_t frames;
-    snd_pcm_sframes_t avail_frames;
-    int8_t buffer[_recordingBufferSizeIn10MS];
+    return true;
+  }
 
+  if (_playoutFramesLeft <= 0) {
+    UnLock();
+    _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
     Lock();
 
-    //return a positive number of frames ready otherwise a negative error code
-    avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
-    if (avail_frames < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
-                      << LATE(snd_strerror)(avail_frames);
-        ErrorRecovery(avail_frames, _handleRecord);
-        UnLock();
-        return true;
-    }
-    else if (avail_frames == 0)
-    { // no frame is available now
-        UnLock();
+    _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+    assert(_playoutFramesLeft == _playoutFramesIn10MS);
+  }
 
-        //maximum time in milliseconds to wait, a negative value means infinity
-        err = LATE(snd_pcm_wait)(_handleRecord,
-            ALSA_CAPTURE_WAIT_TIMEOUT);
-        if (err == 0) //timeout occured
-            LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+  if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
+    avail_frames = _playoutFramesLeft;
 
-        return true;
-    }
+  int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesLeft);
+  frames = LATE(snd_pcm_writei)(
+      _handlePlayout, &_playoutBuffer[_playoutBufferSizeIn10MS - size],
+      avail_frames);
 
-    if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
-        avail_frames = _recordingFramesLeft;
-
-    frames = LATE(snd_pcm_readi)(_handleRecord,
-        buffer, avail_frames); // frames to be written
-    if (frames < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_readi error: "
-                      << LATE(snd_strerror)(frames);
-        ErrorRecovery(frames, _handleRecord);
-        UnLock();
-        return true;
-    }
-    else if (frames > 0)
-    {
-        assert(frames == avail_frames);
-
-        int left_size = LATE(snd_pcm_frames_to_bytes)(_handleRecord,
-            _recordingFramesLeft);
-        int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
-
-        memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
-               buffer, size);
-        _recordingFramesLeft -= frames;
-
-        if (!_recordingFramesLeft)
-        { // buf is full
-            _recordingFramesLeft = _recordingFramesIn10MS;
-
-            // store the recorded buffer (no action will be taken if the
-            // #recorded samples is not a full buffer)
-            _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
-                                               _recordingFramesIn10MS);
-
-            uint32_t currentMicLevel = 0;
-            uint32_t newMicLevel = 0;
-
-            if (AGC())
-            {
-                // store current mic level in the audio buffer if AGC is enabled
-                if (MicrophoneVolume(currentMicLevel) == 0)
-                {
-                    if (currentMicLevel == 0xffffffff)
-                        currentMicLevel = 100;
-                    // this call does not affect the actual microphone volume
-                    _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
-                }
-            }
-
-            // calculate delay
-            _playoutDelay = 0;
-            _recordingDelay = 0;
-            if (_handlePlayout)
-            {
-                err = LATE(snd_pcm_delay)(_handlePlayout,
-                    &_playoutDelay); // returned delay in frames
-                if (err < 0)
-                {
-                    // TODO(xians): Shall we call ErrorRecovery() here?
-                    _playoutDelay = 0;
-                    LOG(LS_ERROR) << "playout snd_pcm_delay: "
-                                  << LATE(snd_strerror)(err);
-                }
-            }
-
-            err = LATE(snd_pcm_delay)(_handleRecord,
-                &_recordingDelay); // returned delay in frames
-            if (err < 0)
-            {
-                // TODO(xians): Shall we call ErrorRecovery() here?
-                _recordingDelay = 0;
-                LOG(LS_ERROR) << "capture snd_pcm_delay: "
-                              << LATE(snd_strerror)(err);
-            }
-
-           // TODO(xians): Shall we add 10ms buffer delay to the record delay?
-            _ptrAudioBuffer->SetVQEData(
-                _playoutDelay * 1000 / _playoutFreq,
-                _recordingDelay * 1000 / _recordingFreq, 0);
-
-            _ptrAudioBuffer->SetTypingStatus(KeyPressed());
-
-            // Deliver recorded samples at specified sample rate, mic level etc.
-            // to the observer using callback.
-            UnLock();
-            _ptrAudioBuffer->DeliverRecordedData();
-            Lock();
-
-            if (AGC())
-            {
-                newMicLevel = _ptrAudioBuffer->NewMicLevel();
-                if (newMicLevel != 0)
-                {
-                    // The VQE will only deliver non-zero microphone levels when a
-                    // change is needed. Set this new mic level (received from the
-                    // observer as return value in the callback).
-                    if (SetMicrophoneVolume(newMicLevel) == -1)
-                        LOG(LS_WARNING)
-                            << "the required modification of the microphone volume failed";
-                }
-            }
-        }
-    }
-
+  if (frames < 0) {
+    LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
+                    << LATE(snd_strerror)(frames);
+    _playoutFramesLeft = 0;
+    ErrorRecovery(frames, _handlePlayout);
     UnLock();
     return true;
+  } else {
+    assert(frames == avail_frames);
+    _playoutFramesLeft -= frames;
+  }
+
+  UnLock();
+  return true;
 }
 
+bool AudioDeviceLinuxALSA::RecThreadProcess() {
+  if (!_recording)
+    return false;
 
-bool AudioDeviceLinuxALSA::KeyPressed() const{
+  int err;
+  snd_pcm_sframes_t frames;
+  snd_pcm_sframes_t avail_frames;
+  int8_t buffer[_recordingBufferSizeIn10MS];
+
+  Lock();
+
+  // return a positive number of frames ready otherwise a negative error code
+  avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
+  if (avail_frames < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
+                  << LATE(snd_strerror)(avail_frames);
+    ErrorRecovery(avail_frames, _handleRecord);
+    UnLock();
+    return true;
+  } else if (avail_frames == 0) {  // no frame is available now
+    UnLock();
+
+    // maximum time in milliseconds to wait, a negative value means infinity
+    err = LATE(snd_pcm_wait)(_handleRecord, ALSA_CAPTURE_WAIT_TIMEOUT);
+    if (err == 0)  // timeout occured
+      LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+
+    return true;
+  }
+
+  if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
+    avail_frames = _recordingFramesLeft;
+
+  frames = LATE(snd_pcm_readi)(_handleRecord, buffer,
+                               avail_frames);  // frames to be written
+  if (frames < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_readi error: "
+                  << LATE(snd_strerror)(frames);
+    ErrorRecovery(frames, _handleRecord);
+    UnLock();
+    return true;
+  } else if (frames > 0) {
+    assert(frames == avail_frames);
+
+    int left_size =
+        LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft);
+    int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
+
+    memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size], buffer,
+           size);
+    _recordingFramesLeft -= frames;
+
+    if (!_recordingFramesLeft) {  // buf is full
+      _recordingFramesLeft = _recordingFramesIn10MS;
+
+      // store the recorded buffer (no action will be taken if the
+      // #recorded samples is not a full buffer)
+      _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+                                         _recordingFramesIn10MS);
+
+      uint32_t currentMicLevel = 0;
+      uint32_t newMicLevel = 0;
+
+      if (AGC()) {
+        // store current mic level in the audio buffer if AGC is enabled
+        if (MicrophoneVolume(currentMicLevel) == 0) {
+          if (currentMicLevel == 0xffffffff)
+            currentMicLevel = 100;
+          // this call does not affect the actual microphone volume
+          _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
+        }
+      }
+
+      // calculate delay
+      _playoutDelay = 0;
+      _recordingDelay = 0;
+      if (_handlePlayout) {
+        err = LATE(snd_pcm_delay)(_handlePlayout,
+                                  &_playoutDelay);  // returned delay in frames
+        if (err < 0) {
+          // TODO(xians): Shall we call ErrorRecovery() here?
+          _playoutDelay = 0;
+          LOG(LS_ERROR) << "playout snd_pcm_delay: " << LATE(snd_strerror)(err);
+        }
+      }
+
+      err = LATE(snd_pcm_delay)(_handleRecord,
+                                &_recordingDelay);  // returned delay in frames
+      if (err < 0) {
+        // TODO(xians): Shall we call ErrorRecovery() here?
+        _recordingDelay = 0;
+        LOG(LS_ERROR) << "capture snd_pcm_delay: " << LATE(snd_strerror)(err);
+      }
+
+      // TODO(xians): Shall we add 10ms buffer delay to the record delay?
+      _ptrAudioBuffer->SetVQEData(_playoutDelay * 1000 / _playoutFreq,
+                                  _recordingDelay * 1000 / _recordingFreq, 0);
+
+      _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+      // Deliver recorded samples at specified sample rate, mic level etc.
+      // to the observer using callback.
+      UnLock();
+      _ptrAudioBuffer->DeliverRecordedData();
+      Lock();
+
+      if (AGC()) {
+        newMicLevel = _ptrAudioBuffer->NewMicLevel();
+        if (newMicLevel != 0) {
+          // The VQE will only deliver non-zero microphone levels when a
+          // change is needed. Set this new mic level (received from the
+          // observer as return value in the callback).
+          if (SetMicrophoneVolume(newMicLevel) == -1)
+            LOG(LS_WARNING)
+                << "the required modification of the microphone volume failed";
+        }
+      }
+    }
+  }
+
+  UnLock();
+  return true;
+}
+
+bool AudioDeviceLinuxALSA::KeyPressed() const {
 #if defined(USE_X11)
   char szKey[32];
   unsigned int i = 0;
diff --git a/modules/audio_device/linux/audio_device_pulse_linux.cc b/modules/audio_device/linux/audio_device_pulse_linux.cc
index f1eddd6..b9614bf 100644
--- a/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -2136,8 +2136,7 @@
               NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
         _writeErrors++;
         if (_writeErrors > 10) {
-          LOG(LS_ERROR) << "Playout error: _writeErrors="
-                        << _writeErrors
+          LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
                         << ", error=" << LATE(pa_context_errno)(_paContext);
           _writeErrors = 0;
         }
@@ -2180,8 +2179,7 @@
                                 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
         _writeErrors++;
         if (_writeErrors > 10) {
-          LOG(LS_ERROR) << "Playout error: _writeErrors="
-                        << _writeErrors
+          LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
                         << ", error=" << LATE(pa_context_errno)(_paContext);
           _writeErrors = 0;
         }
diff --git a/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc b/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
index 02d9cf9..aabf388 100644
--- a/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
+++ b/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
@@ -21,433 +21,368 @@
 #define LATE(sym) \
   LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
-namespace webrtc
-{
+namespace webrtc {
 
-AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA() :
-    _outputMixerHandle(NULL),
-    _inputMixerHandle(NULL),
-    _outputMixerElement(NULL),
-    _inputMixerElement(NULL)
-{
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA()
+    : _outputMixerHandle(NULL),
+      _inputMixerHandle(NULL),
+      _outputMixerElement(NULL),
+      _inputMixerElement(NULL) {
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 
-    memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
-    memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+  memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+  memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
 }
 
-AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
-    Close();
+AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+  Close();
 }
 
 // ============================================================================
 //                                    PUBLIC METHODS
 // ============================================================================
 
-int32_t AudioMixerManagerLinuxALSA::Close()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::Close() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    CloseSpeaker();
-    CloseMicrophone();
+  CloseSpeaker();
+  CloseMicrophone();
 
-    return 0;
-
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::CloseSpeaker()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    int errVal = 0;
+  int errVal = 0;
 
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing playout mixer";
-        LATE(snd_mixer_free)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_close)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-        _outputMixerHandle = NULL;
-        _outputMixerElement = NULL;
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing playout mixer";
+    LATE(snd_mixer_free)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing playout mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
-    memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxALSA::CloseMicrophone()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
-
-    rtc::CritScope lock(&_critSect);
-
-    int errVal = 0;
-
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        LATE(snd_mixer_free)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer 2";
-
-        errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer 3";
-
-        errVal = LATE(snd_mixer_close)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-
-        LOG(LS_VERBOSE) << "Closing record mixer 4";
-        _inputMixerHandle = NULL;
-        _inputMixerElement = NULL;
+    errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching playout mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
-    memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
-                    << deviceName << ")";
-
-    rtc::CritScope lock(&_critSect);
-
-    int errVal = 0;
-
-    // Close any existing output mixer handle
-    //
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing playout mixer";
-
-        LATE(snd_mixer_free)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_close)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
+    errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
     }
     _outputMixerHandle = NULL;
     _outputMixerElement = NULL;
+  }
+  memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
 
-    errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
-        return -1;
-    }
-
-    char controlName[kAdmMaxDeviceNameSize] = { 0 };
-    GetControlName(controlName, deviceName);
-
-    LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
-                    << ")";
-
-    errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
-                      << ") error: " << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
-    }
-    strcpy(_outputMixerStr, controlName);
-
-    errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR)
-            << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
-            << "error: " << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
-    }
-
-    // Load and find the proper mixer element
-    if (LoadSpeakerMixerElement() < 0)
-    {
-        return -1;
-    }
-
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "the output mixer device is now open ("
-                        << _outputMixerHandle << ")";
-    }
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char *deviceName)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
-                    << deviceName << ")";
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    int errVal = 0;
+  int errVal = 0;
 
-    // Close any existing input mixer handle
-    //
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing record mixer";
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing record mixer";
 
-        LATE(snd_mixer_free)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        errVal = LATE(snd_mixer_close)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
+    LATE(snd_mixer_free)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing record mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
+    LOG(LS_VERBOSE) << "Closing record mixer 2";
+
+    errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer 3";
+
+    errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+
+    LOG(LS_VERBOSE) << "Closing record mixer 4";
     _inputMixerHandle = NULL;
     _inputMixerElement = NULL;
+  }
+  memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
 
-    errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
-        return -1;
-    }
-
-    char controlName[kAdmMaxDeviceNameSize] = { 0 };
-    GetControlName(controlName, deviceName);
-
-    LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
-                    << ")";
-
-    errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
-                      << ") error: " << LATE(snd_strerror)(errVal);
-
-        _inputMixerHandle = NULL;
-        return -1;
-    }
-    strcpy(_inputMixerStr, controlName);
-
-    errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR)
-            << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
-            << "error: " << LATE(snd_strerror)(errVal);
-
-        _inputMixerHandle = NULL;
-        return -1;
-    }
-    // Load and find the proper mixer element
-    if (LoadMicMixerElement() < 0)
-    {
-        return -1;
-    }
-
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "the input mixer device is now open ("
-                        << _inputMixerHandle << ")";
-    }
-
-    return 0;
+  return 0;
 }
 
-bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const
-{
-    LOG(LS_INFO) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
+                  << deviceName << ")";
 
-    return (_outputMixerHandle != NULL);
+  rtc::CritScope lock(&_critSect);
+
+  int errVal = 0;
+
+  // Close any existing output mixer handle
+  //
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing playout mixer";
+
+    LATE(snd_mixer_free)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing playout mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching playout mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+  }
+  _outputMixerHandle = NULL;
+  _outputMixerElement = NULL;
+
+  errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
+    return -1;
+  }
+
+  char controlName[kAdmMaxDeviceNameSize] = {0};
+  GetControlName(controlName, deviceName);
+
+  LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+                  << ")";
+
+  errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+                  << ") error: " << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+  strcpy(_outputMixerStr, controlName);
+
+  errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
+  if (errVal < 0) {
+    LOG(LS_ERROR)
+        << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
+        << "error: " << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+
+  // Load and find the proper mixer element
+  if (LoadSpeakerMixerElement() < 0) {
+    return -1;
+  }
+
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "the output mixer device is now open ("
+                    << _outputMixerHandle << ")";
+  }
+
+  return 0;
 }
 
-bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const
-{
-    LOG(LS_INFO) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
+                  << deviceName << ")";
 
-    return (_inputMixerHandle != NULL);
+  rtc::CritScope lock(&_critSect);
+
+  int errVal = 0;
+
+  // Close any existing input mixer handle
+  //
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    LATE(snd_mixer_free)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+  }
+  _inputMixerHandle = NULL;
+  _inputMixerElement = NULL;
+
+  errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
+    return -1;
+  }
+
+  char controlName[kAdmMaxDeviceNameSize] = {0};
+  GetControlName(controlName, deviceName);
+
+  LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+                  << ")";
+
+  errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+                  << ") error: " << LATE(snd_strerror)(errVal);
+
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+  strcpy(_inputMixerStr, controlName);
+
+  errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
+                  << "error: " << LATE(snd_strerror)(errVal);
+
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+  // Load and find the proper mixer element
+  if (LoadMicMixerElement() < 0) {
+    return -1;
+  }
+
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "the input mixer device is now open ("
+                    << _inputMixerHandle << ")";
+  }
+
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(
-    uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
-                    << volume << ")";
+bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const {
+  LOG(LS_INFO) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
-
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
-
-    int errVal =
-        LATE(snd_mixer_selem_set_playback_volume_all)(_outputMixerElement,
-                                                      volume);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error changing master volume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-
-    return (0);
+  return (_outputMixerHandle != NULL);
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(
-    uint32_t& volume) const
-{
+bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const {
+  LOG(LS_INFO) << __FUNCTION__;
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  return (_inputMixerHandle != NULL);
+}
 
-    long int vol(0);
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
+                  << volume << ")";
 
-    int
-        errVal = LATE(snd_mixer_selem_get_playback_volume)(
-            _outputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &vol);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting outputvolume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
-                    << vol;
+  rtc::CritScope lock(&_critSect);
 
-    volume = static_cast<uint32_t> (vol);
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    return 0;
+  int errVal = LATE(snd_mixer_selem_set_playback_volume_all)(
+      _outputMixerElement, volume);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error changing master volume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
+
+  long int vol(0);
+
+  int errVal = LATE(snd_mixer_selem_get_playback_volume)(
+      _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting outputvolume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
+                  << vol;
+
+  volume = static_cast<uint32_t>(vol);
+
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume(
-    uint32_t& maxVolume) const
-{
+    uint32_t& maxVolume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avilable output mixer element exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avilable output mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+      _outputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement,
-                                                        &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
 
-    LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  maxVolume = static_cast<uint32_t>(maxVol);
 
-    maxVolume = static_cast<uint32_t> (maxVol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume(
-    uint32_t& minVolume) const
-{
+    uint32_t& minVolume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+      _outputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement,
-                                                        &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
 
-    LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  minVolume = static_cast<uint32_t>(minVol);
 
-    minVolume = static_cast<uint32_t> (minVol);
-
-    return 0;
+  return 0;
 }
 
 // TL: Have done testnig with these but they don't seem reliable and
@@ -534,239 +469,195 @@
  }
  */
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(
-    bool& available)
-{
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
+  available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(
-    bool& available)
-{
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+  available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
+                  << enable << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    // Ensure that the selected speaker destination has a valid mute control.
-    bool available(false);
-    SpeakerMuteIsAvailable(available);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the speaker";
-        return -1;
-    }
+  // Ensure that the selected speaker destination has a valid mute control.
+  bool available(false);
+  SpeakerMuteIsAvailable(available);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the speaker";
+    return -1;
+  }
 
-    // Note value = 0 (off) means muted
-    int errVal =
-        LATE(snd_mixer_selem_set_playback_switch_all)(_outputMixerElement,
-                                                      !enable);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error setting playback switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  int errVal = LATE(snd_mixer_selem_set_playback_switch_all)(
+      _outputMixerElement, !enable);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error setting playback switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const
-{
+int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer exists";
-        return -1;
-    }
+  // Ensure that the selected speaker destination has a valid mute control.
+  bool available =
+      LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the speaker";
+    return -1;
+  }
 
-    // Ensure that the selected speaker destination has a valid mute control.
-    bool available =
-        LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the speaker";
-        return -1;
-    }
+  int value(false);
 
-    int value(false);
+  // Retrieve one boolean control value for a specified mute-control
+  //
+  int errVal = LATE(snd_mixer_selem_get_playback_switch)(
+      _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting playback switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    // Retrieve one boolean control value for a specified mute-control
-    //
-    int
-        errVal = LATE(snd_mixer_selem_get_playback_switch)(
-            _outputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &value);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting playback switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  enabled = (bool)!value;
 
-    // Note value = 0 (off) means muted
-    enabled = (bool) !value;
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(
-    bool& available)
-{
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
-    return 0;
+  available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
+                  << enable << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    // Ensure that the selected microphone destination has a valid mute control.
-    bool available(false);
-    MicrophoneMuteIsAvailable(available);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the microphone";
-        return -1;
-    }
+  // Ensure that the selected microphone destination has a valid mute control.
+  bool available(false);
+  MicrophoneMuteIsAvailable(available);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the microphone";
+    return -1;
+  }
 
-    // Note value = 0 (off) means muted
-    int errVal =
-        LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement,
-                                                     !enable);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error setting capture switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  int errVal =
+      LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error setting capture switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer exists";
-        return -1;
-    }
+  // Ensure that the selected microphone destination has a valid mute control.
+  bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the microphone";
+    return -1;
+  }
 
-    // Ensure that the selected microphone destination has a valid mute control.
-    bool available =
-        LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the microphone";
-        return -1;
-    }
+  int value(false);
 
-    int value(false);
+  // Retrieve one boolean control value for a specified mute-control
+  //
+  int errVal = LATE(snd_mixer_selem_get_capture_switch)(
+      _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting capture switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    // Retrieve one boolean control value for a specified mute-control
-    //
-    int
-        errVal = LATE(snd_mixer_selem_get_capture_switch)(
-            _inputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &value);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting capture switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  enabled = (bool)!value;
 
-    // Note value = 0 (off) means muted
-    enabled = (bool) !value;
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable(
-    bool& available)
-{
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+    bool& available) {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
+  available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(
-    uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume="
-                    << volume << ")";
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    int
-        errVal =
-            LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement,
-                                                         volume);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error changing microphone volume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  int errVal =
+      LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error changing microphone volume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
 // TL: Have done testnig with these but they don't seem reliable and
@@ -799,8 +690,8 @@
 
  maxVol = (long int)maxVolume;
  printf("min %d max %d", minVol, maxVol);
- errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol, maxVol);
- LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
+ errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol,
+ maxVol); LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
                  << ", max: " << maxVol;
  if (errVal != 0)
  {
@@ -855,263 +746,220 @@
  }
  */
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(
-    uint32_t& volume) const
-{
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int vol(0);
 
-    long int vol(0);
+  int errVal = LATE(snd_mixer_selem_get_capture_volume)(
+      _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting inputvolume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol="
+                  << vol;
 
-    int
-        errVal =
-            LATE(snd_mixer_selem_get_capture_volume)(
-                _inputMixerElement,
-                (snd_mixer_selem_channel_id_t) 0,
-                &vol);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting inputvolume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol="
-                    << vol;
+  volume = static_cast<uint32_t>(vol);
 
-    volume = static_cast<uint32_t> (vol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume(
-    uint32_t& maxVolume) const
-{
+    uint32_t& maxVolume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  // check if we have mic volume at all
+  if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) {
+    LOG(LS_ERROR) << "No microphone volume available";
+    return -1;
+  }
 
-    // check if we have mic volume at all
-    if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement))
-    {
-        LOG(LS_ERROR) << "No microphone volume available";
-        return -1;
-    }
+  int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+      _inputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement,
-                                                       &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting microphone volume range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting microphone volume range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  maxVolume = static_cast<uint32_t>(maxVol);
 
-    maxVolume = static_cast<uint32_t> (maxVol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume(
-    uint32_t& minVolume) const
-{
+    uint32_t& minVolume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+      _inputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement,
-                                                       &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting microphone volume range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting microphone volume range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  minVolume = static_cast<uint32_t>(minVol);
 
-    minVolume = static_cast<uint32_t> (minVol);
-
-    return 0;
+  return 0;
 }
 
 // ============================================================================
 //                                 Private Methods
 // ============================================================================
 
-int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const
-{
-    int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
-                      << LATE(snd_strerror)(errVal);
-        _inputMixerHandle = NULL;
-        return -1;
+int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const {
+  int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
+                  << LATE(snd_strerror)(errVal);
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+
+  snd_mixer_elem_t* elem = NULL;
+  snd_mixer_elem_t* micElem = NULL;
+  unsigned mixerIdx = 0;
+  const char* selemName = NULL;
+
+  // Find and store handles to the right mixer elements
+  for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem;
+       elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+    if (LATE(snd_mixer_selem_is_active)(elem)) {
+      selemName = LATE(snd_mixer_selem_get_name)(elem);
+      if (strcmp(selemName, "Capture") == 0)  // "Capture", "Mic"
+      {
+        _inputMixerElement = elem;
+        LOG(LS_VERBOSE) << "Capture element set";
+      } else if (strcmp(selemName, "Mic") == 0) {
+        micElem = elem;
+        LOG(LS_VERBOSE) << "Mic element found";
+      }
     }
 
-    snd_mixer_elem_t *elem = NULL;
-    snd_mixer_elem_t *micElem = NULL;
-    unsigned mixerIdx = 0;
-    const char *selemName = NULL;
-
-    // Find and store handles to the right mixer elements
-    for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem; elem
-        = LATE(snd_mixer_elem_next)(elem), mixerIdx++)
-    {
-        if (LATE(snd_mixer_selem_is_active)(elem))
-        {
-            selemName = LATE(snd_mixer_selem_get_name)(elem);
-            if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic"
-            {
-                _inputMixerElement = elem;
-                LOG(LS_VERBOSE) << "Capture element set";
-            } else if (strcmp(selemName, "Mic") == 0)
-            {
-                micElem = elem;
-                LOG(LS_VERBOSE) << "Mic element found";
-            }
-        }
-
-        if (_inputMixerElement)
-        {
-            // Use the first Capture element that is found
-            // The second one may not work
-            break;
-        }
+    if (_inputMixerElement) {
+      // Use the first Capture element that is found
+      // The second one may not work
+      break;
     }
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        // We didn't find a Capture handle, use Mic.
-        if (micElem != NULL)
-        {
-            _inputMixerElement = micElem;
-            LOG(LS_VERBOSE) << "Using Mic as capture volume.";
-        } else
-        {
-            _inputMixerElement = NULL;
-            LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
+  if (_inputMixerElement == NULL) {
+    // We didn't find a Capture handle, use Mic.
+    if (micElem != NULL) {
+      _inputMixerElement = micElem;
+      LOG(LS_VERBOSE) << "Using Mic as capture volume.";
+    } else {
+      _inputMixerElement = NULL;
+      LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
 
-            return -1;
-        }
+      return -1;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const
-{
-    int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
-                      << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
+int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const {
+  int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
+                  << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+
+  snd_mixer_elem_t* elem = NULL;
+  snd_mixer_elem_t* masterElem = NULL;
+  snd_mixer_elem_t* speakerElem = NULL;
+  unsigned mixerIdx = 0;
+  const char* selemName = NULL;
+
+  // Find and store handles to the right mixer elements
+  for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem;
+       elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+    if (LATE(snd_mixer_selem_is_active)(elem)) {
+      selemName = LATE(snd_mixer_selem_get_name)(elem);
+      LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
+                      << selemName << " =" << elem;
+
+      // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
+      if (strcmp(selemName, "PCM") == 0) {
+        _outputMixerElement = elem;
+        LOG(LS_VERBOSE) << "PCM element set";
+      } else if (strcmp(selemName, "Master") == 0) {
+        masterElem = elem;
+        LOG(LS_VERBOSE) << "Master element found";
+      } else if (strcmp(selemName, "Speaker") == 0) {
+        speakerElem = elem;
+        LOG(LS_VERBOSE) << "Speaker element found";
+      }
     }
 
-    snd_mixer_elem_t *elem = NULL;
-    snd_mixer_elem_t *masterElem = NULL;
-    snd_mixer_elem_t *speakerElem = NULL;
-    unsigned mixerIdx = 0;
-    const char *selemName = NULL;
-
-    // Find and store handles to the right mixer elements
-    for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem; elem
-        = LATE(snd_mixer_elem_next)(elem), mixerIdx++)
-    {
-        if (LATE(snd_mixer_selem_is_active)(elem))
-        {
-            selemName = LATE(snd_mixer_selem_get_name)(elem);
-            LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
-                            << selemName << " =" << elem;
-
-            // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
-            if (strcmp(selemName, "PCM") == 0)
-            {
-                _outputMixerElement = elem;
-                LOG(LS_VERBOSE) << "PCM element set";
-            } else if (strcmp(selemName, "Master") == 0)
-            {
-                masterElem = elem;
-                LOG(LS_VERBOSE) << "Master element found";
-            } else if (strcmp(selemName, "Speaker") == 0)
-            {
-                speakerElem = elem;
-                LOG(LS_VERBOSE) << "Speaker element found";
-            }
-        }
-
-        if (_outputMixerElement)
-        {
-            // We have found the element we want
-            break;
-        }
+    if (_outputMixerElement) {
+      // We have found the element we want
+      break;
     }
+  }
 
-    // If we didn't find a PCM Handle, use Master or Speaker
-    if (_outputMixerElement == NULL)
-    {
-        if (masterElem != NULL)
-        {
-            _outputMixerElement = masterElem;
-            LOG(LS_VERBOSE) << "Using Master as output volume.";
-        } else if (speakerElem != NULL)
-        {
-            _outputMixerElement = speakerElem;
-            LOG(LS_VERBOSE) << "Using Speaker as output volume.";
-        } else
-        {
-            _outputMixerElement = NULL;
-            LOG(LS_ERROR) << "Could not find output volume in the mixer.";
-            return -1;
-        }
+  // If we didn't find a PCM Handle, use Master or Speaker
+  if (_outputMixerElement == NULL) {
+    if (masterElem != NULL) {
+      _outputMixerElement = masterElem;
+      LOG(LS_VERBOSE) << "Using Master as output volume.";
+    } else if (speakerElem != NULL) {
+      _outputMixerElement = speakerElem;
+      LOG(LS_VERBOSE) << "Using Speaker as output volume.";
+    } else {
+      _outputMixerElement = NULL;
+      LOG(LS_ERROR) << "Could not find output volume in the mixer.";
+      return -1;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
 void AudioMixerManagerLinuxALSA::GetControlName(char* controlName,
-                                                char* deviceName) const
-{
-    // Example
-    // deviceName: "front:CARD=Intel,DEV=0"
-    // controlName: "hw:CARD=Intel"
-    char* pos1 = strchr(deviceName, ':');
-    char* pos2 = strchr(deviceName, ',');
-    if (!pos2) {
-        // Can also be default:CARD=Intel
-        pos2 = &deviceName[strlen(deviceName)];
-    }
-    if (pos1 && pos2) {
-        strcpy(controlName, "hw");
-        int nChar = (int) (pos2 - pos1);
-        strncpy(&controlName[2], pos1, nChar);
-        controlName[2 + nChar] = '\0';
-    } else {
-        strcpy(controlName, deviceName);
-    }
-
+                                                char* deviceName) const {
+  // Example
+  // deviceName: "front:CARD=Intel,DEV=0"
+  // controlName: "hw:CARD=Intel"
+  char* pos1 = strchr(deviceName, ':');
+  char* pos2 = strchr(deviceName, ',');
+  if (!pos2) {
+    // Can also be default:CARD=Intel
+    pos2 = &deviceName[strlen(deviceName)];
+  }
+  if (pos1 && pos2) {
+    strcpy(controlName, "hw");
+    int nChar = (int)(pos2 - pos1);
+    strncpy(&controlName[2], pos1, nChar);
+    controlName[2 + nChar] = '\0';
+  } else {
+    strcpy(controlName, deviceName);
+  }
 }
 
-}
+}  // namespace webrtc
diff --git a/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
index 21f7fd0..80896c9 100644
--- a/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
+++ b/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -23,8 +23,7 @@
   LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \
               sym)
 
-namespace webrtc
-{
+namespace webrtc {
 
 class AutoPulseLock {
  public:
@@ -33,38 +32,34 @@
     LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
   }
 
-  ~AutoPulseLock() {
-    LATE(pa_threaded_mainloop_unlock)(pa_mainloop_);
-  }
+  ~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); }
 
  private:
   pa_threaded_mainloop* const pa_mainloop_;
 };
 
-AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse() :
-    _paOutputDeviceIndex(-1),
-    _paInputDeviceIndex(-1),
-    _paPlayStream(NULL),
-    _paRecStream(NULL),
-    _paMainloop(NULL),
-    _paContext(NULL),
-    _paVolume(0),
-    _paMute(0),
-    _paVolSteps(0),
-    _paSpeakerMute(false),
-    _paSpeakerVolume(PA_VOLUME_NORM),
-    _paChannels(0),
-    _paObjectsSet(false)
-{
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse()
+    : _paOutputDeviceIndex(-1),
+      _paInputDeviceIndex(-1),
+      _paPlayStream(NULL),
+      _paRecStream(NULL),
+      _paMainloop(NULL),
+      _paContext(NULL),
+      _paVolume(0),
+      _paMute(0),
+      _paVolSteps(0),
+      _paSpeakerMute(false),
+      _paSpeakerVolume(PA_VOLUME_NORM),
+      _paChannels(0),
+      _paObjectsSet(false) {
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 }
 
-AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Close();
+  Close();
 }
 
 // ===========================================================================
@@ -73,866 +68,734 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
     pa_threaded_mainloop* mainloop,
-    pa_context* context)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+    pa_context* context) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    if (!mainloop || !context)
-    {
-        LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
-        return -1;
-    }
+  if (!mainloop || !context) {
+    LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
+    return -1;
+  }
 
-    _paMainloop = mainloop;
-    _paContext = context;
-    _paObjectsSet = true;
+  _paMainloop = mainloop;
+  _paContext = context;
+  _paObjectsSet = true;
 
-    LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
+  LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::Close()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::Close() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    CloseSpeaker();
-    CloseMicrophone();
+  CloseSpeaker();
+  CloseMicrophone();
 
-    _paMainloop = NULL;
-    _paContext = NULL;
-    _paObjectsSet = false;
+  _paMainloop = NULL;
+  _paContext = NULL;
+  _paObjectsSet = false;
 
-    return 0;
-
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    // Reset the index to -1
-    _paOutputDeviceIndex = -1;
-    _paPlayStream = NULL;
+  // Reset the index to -1
+  _paOutputDeviceIndex = -1;
+  _paPlayStream = NULL;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    // Reset the index to -1
-    _paInputDeviceIndex = -1;
-    _paRecStream = NULL;
+  // Reset the index to -1
+  _paInputDeviceIndex = -1;
+  _paRecStream = NULL;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
+int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
 
-    _paPlayStream = playStream;
-    return 0;
+  _paPlayStream = playStream;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
+int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
 
-    _paRecStream = recStream;
-    return 0;
+  _paRecStream = recStream;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
-    uint16_t deviceIndex)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
-                    << deviceIndex << ")";
+int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
+                  << deviceIndex << ")";
 
-    // No point in opening the speaker
-    // if PA objects have not been set
-    if (!_paObjectsSet)
-    {
-        LOG(LS_ERROR) << "PulseAudio objects has not been set";
-        return -1;
-    }
+  // No point in opening the speaker
+  // if PA objects have not been set
+  if (!_paObjectsSet) {
+    LOG(LS_ERROR) << "PulseAudio objects has not been set";
+    return -1;
+  }
 
-    // Set the index for the PulseAudio
-    // output device to control
-    _paOutputDeviceIndex = deviceIndex;
+  // Set the index for the PulseAudio
+  // output device to control
+  _paOutputDeviceIndex = deviceIndex;
 
-    LOG(LS_VERBOSE) << "the output mixer device is now open";
+  LOG(LS_VERBOSE) << "the output mixer device is now open";
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
-    uint16_t deviceIndex)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
-        << deviceIndex << ")";
+int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
+                  << deviceIndex << ")";
 
-    // No point in opening the microphone
-    // if PA objects have not been set
-    if (!_paObjectsSet)
-    {
-        LOG(LS_ERROR) << "PulseAudio objects have not been set";
-        return -1;
-    }
+  // No point in opening the microphone
+  // if PA objects have not been set
+  if (!_paObjectsSet) {
+    LOG(LS_ERROR) << "PulseAudio objects have not been set";
+    return -1;
+  }
 
-    // Set the index for the PulseAudio
-    // input device to control
-    _paInputDeviceIndex = deviceIndex;
+  // Set the index for the PulseAudio
+  // input device to control
+  _paInputDeviceIndex = deviceIndex;
 
-    LOG(LS_VERBOSE) << "the input mixer device is now open";
+  LOG(LS_VERBOSE) << "the input mixer device is now open";
 
-    return 0;
+  return 0;
 }
 
-bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__;
+bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__;
 
-    return (_paOutputDeviceIndex != -1);
+  return (_paOutputDeviceIndex != -1);
 }
 
-bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__;
+bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__;
 
-    return (_paInputDeviceIndex != -1);
+  return (_paInputDeviceIndex != -1);
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
-    uint32_t volume)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
-                    << volume << ")";
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
+                  << volume << ")";
 
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  bool setFailed(false);
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only really set the volume if we have a connected stream
+    AutoPulseLock auto_lock(_paMainloop);
+
+    // Get the number of channels from the sample specification
+    const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream);
+    if (!spec) {
+      LOG(LS_ERROR) << "could not get sample specification";
+      return -1;
     }
 
-    bool setFailed(false);
+    // Set the same volume for all channels
+    pa_cvolume cVolumes;
+    LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
 
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only really set the volume if we have a connected stream
-        AutoPulseLock auto_lock(_paMainloop);
-
-        // Get the number of channels from the sample specification
-        const pa_sample_spec *spec =
-            LATE(pa_stream_get_sample_spec)(_paPlayStream);
-        if (!spec)
-        {
-            LOG(LS_ERROR) << "could not get sample specification";
-            return -1;
-        }
-
-        // Set the same volume for all channels
-        pa_cvolume cVolumes;
-        LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
-
-        pa_operation* paOperation = NULL;
-        paOperation = LATE(pa_context_set_sink_input_volume)(
-            _paContext,
-            LATE(pa_stream_get_index)(_paPlayStream),
-            &cVolumes,
-            PaSetVolumeCallback, NULL);
-        if (!paOperation)
-        {
-            setFailed = true;
-        }
-
-        // Don't need to wait for the completion
-        LATE(pa_operation_unref)(paOperation);
-    } else
-    {
-        // We have not created a stream or it's not connected to the sink
-        // Save the volume to be set at connection
-        _paSpeakerVolume = volume;
+    pa_operation* paOperation = NULL;
+    paOperation = LATE(pa_context_set_sink_input_volume)(
+        _paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes,
+        PaSetVolumeCallback, NULL);
+    if (!paOperation) {
+      setFailed = true;
     }
 
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not set speaker volume, error="
-                        << LATE(pa_context_errno)(_paContext);
+    // Don't need to wait for the completion
+    LATE(pa_operation_unref)(paOperation);
+  } else {
+    // We have not created a stream or it's not connected to the sink
+    // Save the volume to be set at connection
+    _paSpeakerVolume = volume;
+  }
 
-        return -1;
-    }
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not set speaker volume, error="
+                    << LATE(pa_context_errno)(_paContext);
 
-    return 0;
+    return -1;
+  }
+
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const
-{
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
 
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only get the volume if we have a connected stream
-        if (!GetSinkInputInfo())
-          return -1;
-
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = static_cast<uint32_t> (_paVolume);
-    } else
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = _paSpeakerVolume;
-    }
-
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
-                    << volume;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // PA_VOLUME_NORM corresponds to 100% (0db)
-    // but PA allows up to 150 db amplification
-    maxVolume = static_cast<uint32_t> (PA_VOLUME_NORM);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    minVolume = static_cast<uint32_t> (PA_VOLUME_MUTED);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // Always available in Pulse Audio
-    available = true;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // Always available in Pulse Audio
-    available = true;
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
-                    << enable << ")";
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    bool setFailed(false);
-
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only really mute if we have a connected stream
-        AutoPulseLock auto_lock(_paMainloop);
-
-        pa_operation* paOperation = NULL;
-        paOperation = LATE(pa_context_set_sink_input_mute)(
-            _paContext,
-            LATE(pa_stream_get_index)(_paPlayStream),
-            (int) enable,
-            PaSetVolumeCallback,
-            NULL);
-        if (!paOperation)
-        {
-            setFailed = true;
-        }
-
-        // Don't need to wait for the completion
-        LATE(pa_operation_unref)(paOperation);
-    } else
-    {
-        // We have not created a stream or it's not connected to the sink
-        // Save the mute status to be set at connection
-        _paSpeakerMute = enable;
-    }
-
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not mute speaker, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only get the mute status if we have a connected stream
-        if (!GetSinkInputInfo())
-          return -1;
-
-        enabled = static_cast<bool> (_paMute);
-    } else
-    {
-        enabled = _paSpeakerMute;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled="
-                    << enabled;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-
-        // Get the actual stream device index if we have a connected stream
-        // The device used by the stream can be changed
-        // during the call
-        if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-            != PA_STREAM_UNCONNECTED))
-        {
-            deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
-        }
-    }
-
-    if (!GetSinkInfoByIndex(deviceIndex))
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only get the volume if we have a connected stream
+    if (!GetSinkInputInfo())
       return -1;
 
-    available = static_cast<bool> (_paChannels == 2);
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = static_cast<uint32_t>(_paVolume);
+  } else {
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = _paSpeakerVolume;
+  }
 
-    return 0;
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
+                  << volume;
+
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
+int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume(
+    uint32_t& maxVolume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // PA_VOLUME_NORM corresponds to 100% (0db)
+  // but PA allows up to 150 db amplification
+  maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume(
+    uint32_t& minVolume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // Always available in Pulse Audio
+  available = true;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // Always available in Pulse Audio
+  available = true;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
+                  << enable << ")";
+
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  bool setFailed(false);
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only really mute if we have a connected stream
+    AutoPulseLock auto_lock(_paMainloop);
+
+    pa_operation* paOperation = NULL;
+    paOperation = LATE(pa_context_set_sink_input_mute)(
+        _paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable,
+        PaSetVolumeCallback, NULL);
+    if (!paOperation) {
+      setFailed = true;
     }
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
+    // Don't need to wait for the completion
+    LATE(pa_operation_unref)(paOperation);
+  } else {
+    // We have not created a stream or it's not connected to the sink
+    // Save the mute status to be set at connection
+    _paSpeakerMute = enable;
+  }
 
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not mute speaker, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only get the mute status if we have a connected stream
+    if (!GetSinkInputInfo())
+      return -1;
+
+    enabled = static_cast<bool>(_paMute);
+  } else {
+    enabled = _paSpeakerMute;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled="
+                  << enabled;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
 
     // Get the actual stream device index if we have a connected stream
     // The device used by the stream can be changed
     // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    if (_paPlayStream &&
+        (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
     }
+  }
 
-    pa_operation* paOperation = NULL;
+  if (!GetSinkInfoByIndex(deviceIndex))
+    return -1;
 
-    // Get info for this source
-    // We want to know if the actual device can record in stereo
-    paOperation = LATE(pa_context_get_source_info_by_index)(
-        _paContext, deviceIndex,
-        PaSourceInfoCallback,
-        (void*) this);
+  available = static_cast<bool>(_paChannels == 2);
 
-    WaitForOperationCompletion(paOperation);
+  return 0;
+}
 
-    available = static_cast<bool> (_paChannels == 2);
+int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
-        << " => available=" << available;
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
 
-    return 0;
+  AutoPulseLock auto_lock(_paMainloop);
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  pa_operation* paOperation = NULL;
+
+  // Get info for this source
+  // We want to know if the actual device can record in stereo
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+  WaitForOperationCompletion(paOperation);
+
+  available = static_cast<bool>(_paChannels == 2);
+
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
+                  << " => available=" << available;
+
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
-    bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Always available in Pulse Audio
-    available = true;
+  // Always available in Pulse Audio
+  available = true;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable="
+                  << enable << ")";
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    bool setFailed(false);
-    pa_operation* paOperation = NULL;
+  bool setFailed(false);
+  pa_operation* paOperation = NULL;
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
 
+  AutoPulseLock auto_lock(_paMainloop);
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  // Set mute switch for the source
+  paOperation = LATE(pa_context_set_source_mute_by_index)(
+      _paContext, deviceIndex, enable, PaSetVolumeCallback, NULL);
+
+  if (!paOperation) {
+    setFailed = true;
+  }
+
+  // Don't need to wait for this to complete.
+  LATE(pa_operation_unref)(paOperation);
+
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not mute microphone, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
-
     // Get the actual stream device index if we have a connected stream
     // The device used by the stream can be changed
     // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    if (_paRecStream &&
+        (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
     }
+  }
 
-    // Set mute switch for the source
-    paOperation = LATE(pa_context_set_source_mute_by_index)(
-        _paContext, deviceIndex,
-        enable,
-        PaSetVolumeCallback, NULL);
+  if (!GetSourceInfoByIndex(deviceIndex))
+    return -1;
 
-    if (!paOperation)
-    {
-        setFailed = true;
-    }
+  enabled = static_cast<bool>(_paMute);
 
-    // Don't need to wait for this to complete.
-    LATE(pa_operation_unref)(paOperation);
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled="
+                  << enabled;
 
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not mute microphone, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
-
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        // Get the actual stream device index if we have a connected stream
-        // The device used by the stream can be changed
-        // during the call
-        if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-            != PA_STREAM_UNCONNECTED))
-        {
-            deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
-        }
-    }
-
-    if (!GetSourceInfoByIndex(deviceIndex))
-      return -1;
-
-    enabled = static_cast<bool> (_paMute);
-
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled="
-        << enabled;
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
-    bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Always available in Pulse Audio
-    available = true;
+  // Always available in Pulse Audio
+  available = true;
 
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
-{
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume
-        << ")";
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Unlike output streams, input streams have no concept of a stream
-    // volume, only a device volume. So we have to change the volume of the
-    // device itself.
+  // Unlike output streams, input streams have no concept of a stream
+  // volume, only a device volume. So we have to change the volume of the
+  // device itself.
 
-    // The device may have a different number of channels than the stream and
-    // their mapping may be different, so we don't want to use the channel
-    // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
-    // bases, and the server allows that even if the device's channel count
-    // is lower, but some buggy PA clients don't like that (the pavucontrol
-    // on Hardy dies in an assert if the channel count is different). So
-    // instead we look up the actual number of channels that the device has.
+  // The device may have a different number of channels than the stream and
+  // their mapping may be different, so we don't want to use the channel
+  // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
+  // bases, and the server allows that even if the device's channel count
+  // is lower, but some buggy PA clients don't like that (the pavucontrol
+  // on Hardy dies in an assert if the channel count is different). So
+  // instead we look up the actual number of channels that the device has.
+  AutoPulseLock auto_lock(_paMainloop);
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  bool setFailed(false);
+  pa_operation* paOperation = NULL;
+
+  // Get the number of channels for this source
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+  WaitForOperationCompletion(paOperation);
+
+  uint8_t channels = _paChannels;
+  pa_cvolume cVolumes;
+  LATE(pa_cvolume_set)(&cVolumes, channels, volume);
+
+  // Set the volume for the source
+  paOperation = LATE(pa_context_set_source_volume_by_index)(
+      _paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL);
+
+  if (!paOperation) {
+    setFailed = true;
+  }
+
+  // Don't need to wait for this to complete.
+  LATE(pa_operation_unref)(paOperation);
+
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not set microphone volume, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    // Get the actual stream device index if we have a connected stream
-    // The device used by the stream can be changed
-    // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    // Get the actual stream device index if we have a connected stream.
+    // The device used by the stream can be changed during the call.
+    if (_paRecStream &&
+        (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
     }
+  }
 
-    bool setFailed(false);
-    pa_operation* paOperation = NULL;
+  if (!GetSourceInfoByIndex(deviceIndex))
+    return -1;
 
-    // Get the number of channels for this source
-    paOperation
-        = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex,
-                                                    PaSourceInfoCallback,
-                                                    (void*) this);
+  {
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = static_cast<uint32_t>(_paVolume);
+  }
 
-    WaitForOperationCompletion(paOperation);
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol="
+                  << volume;
 
-    uint8_t channels = _paChannels;
-    pa_cvolume cVolumes;
-    LATE(pa_cvolume_set)(&cVolumes, channels, volume);
-
-    // Set the volume for the source
-    paOperation
-        = LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex,
-                                                      &cVolumes,
-                                                      PaSetVolumeCallback,
-                                                      NULL);
-
-    if (!paOperation)
-    {
-        setFailed = true;
-    }
-
-    // Don't need to wait for this to complete.
-    LATE(pa_operation_unref)(paOperation);
-
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not set microphone volume, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const
-{
+int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(
+    uint32_t& maxVolume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  // PA_VOLUME_NORM corresponds to 100% (0db)
+  // PA allows up to 150 db amplification (PA_VOLUME_MAX)
+  // but that doesn't work well for all sound cards
+  maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    {
-      AutoPulseLock auto_lock(_paMainloop);
-      // Get the actual stream device index if we have a connected stream.
-      // The device used by the stream can be changed during the call.
-      if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-          != PA_STREAM_UNCONNECTED))
-      {
-          deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
-      }
-    }
-
-    if (!GetSourceInfoByIndex(deviceIndex))
-        return -1;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = static_cast<uint32_t> (_paVolume);
-    }
-
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol="
-        << volume;
-
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
+int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume(
+    uint32_t& minVolume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
 
-    // PA_VOLUME_NORM corresponds to 100% (0db)
-    // PA allows up to 150 db amplification (PA_VOLUME_MAX)
-    // but that doesn't work well for all sound cards
-    maxVolume = static_cast<uint32_t> (PA_VOLUME_NORM);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const
-{
-
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
-
-    minVolume = static_cast<uint32_t> (PA_VOLUME_MUTED);
-
-    return 0;
+  return 0;
 }
 
 // ===========================================================================
 //                                 Private Methods
 // ===========================================================================
 
-void
-AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
-                                                const pa_sink_info *i,
-                                                int eol,
-                                                void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSinkInfoCallbackHandler(i, eol);
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+                                                     const pa_sink_info* i,
+                                                     int eol,
+                                                     void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(
+      i, eol);
 }
 
-void
-AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
-    pa_context */*c*/,
-    const pa_sink_input_info *i,
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
+    pa_context* /*c*/,
+    const pa_sink_input_info* i,
     int eol,
-    void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSinkInputInfoCallbackHandler(i, eol);
+    void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)
+      ->PaSinkInputInfoCallbackHandler(i, eol);
 }
 
-
-void
-AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
-                                                  const pa_source_info *i,
-                                                  int eol,
-                                                  void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSourceInfoCallbackHandler(i, eol);
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+                                                       const pa_source_info* i,
+                                                       int eol,
+                                                       void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(
+      i, eol);
 }
 
-void
-AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c,
-                                                 int success,
-                                                 void */*pThis*/)
-{
-    if (!success)
-    {
-        LOG(LS_ERROR) << "failed to set volume";
-    }
+void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c,
+                                                      int success,
+                                                      void* /*pThis*/) {
+  if (!success) {
+    LOG(LS_ERROR) << "failed to set volume";
+  }
 }
 
 void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
-    const pa_sink_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_sink_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // get the max volume for any channel
-    _paMute = i->mute; // get mute status
+  }
+  _paVolume = paVolume;  // get the max volume for any channel
+  _paMute = i->mute;     // get mute status
 
-    // supported since PA 0.9.15
-    //_paVolSteps = i->n_volume_steps; // get the number of volume steps
-    // default value is PA_VOLUME_NORM+1
-    _paVolSteps = PA_VOLUME_NORM + 1;
+  // supported since PA 0.9.15
+  //_paVolSteps = i->n_volume_steps; // get the number of volume steps
+  // default value is PA_VOLUME_NORM+1
+  _paVolSteps = PA_VOLUME_NORM + 1;
 }
 
 void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
-    const pa_sink_input_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_sink_input_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // Get the max volume for any channel
-    _paMute = i->mute; // Get mute status
+  }
+  _paVolume = paVolume;  // Get the max volume for any channel
+  _paMute = i->mute;     // Get mute status
 }
 
 void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
-    const pa_source_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_source_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // Get the max volume for any channel
-    _paMute = i->mute; // Get mute status
+  }
+  _paVolume = paVolume;  // Get the max volume for any channel
+  _paMute = i->mute;     // Get mute status
 
-    // supported since PA 0.9.15
-    //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
-    // default value is PA_VOLUME_NORM+1
-    _paVolSteps = PA_VOLUME_NORM + 1;
+  // supported since PA 0.9.15
+  //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
+  // default value is PA_VOLUME_NORM+1
+  _paVolSteps = PA_VOLUME_NORM + 1;
 }
 
 void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
-    pa_operation* paOperation) const
-{
-    while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
-    {
-        LATE(pa_threaded_mainloop_wait)(_paMainloop);
-    }
+    pa_operation* paOperation) const {
+  while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+    LATE(pa_threaded_mainloop_wait)(_paMainloop);
+  }
 
-    LATE(pa_operation_unref)(paOperation);
+  LATE(pa_operation_unref)(paOperation);
 }
 
 bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
@@ -941,37 +804,33 @@
   AutoPulseLock auto_lock(_paMainloop);
   // Get info for this stream (sink input).
   paOperation = LATE(pa_context_get_sink_input_info)(
-      _paContext,
-      LATE(pa_stream_get_index)(_paPlayStream),
-      PaSinkInputInfoCallback,
-      (void*) this);
+      _paContext, LATE(pa_stream_get_index)(_paPlayStream),
+      PaSinkInputInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(
-    int device_index) const {
+bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const {
   pa_operation* paOperation = NULL;
 
   AutoPulseLock auto_lock(_paMainloop);
-  paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext,
-      device_index, PaSinkInfoCallback, (void*) this);
+  paOperation = LATE(pa_context_get_sink_info_by_index)(
+      _paContext, device_index, PaSinkInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(
-    int device_index) const {
+bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const {
   pa_operation* paOperation = NULL;
 
   AutoPulseLock auto_lock(_paMainloop);
-  paOperation  = LATE(pa_context_get_source_info_by_index)(
-      _paContext, device_index, PaSourceInfoCallback, (void*) this);
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, device_index, PaSourceInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-}
+}  // namespace webrtc
diff --git a/modules/audio_device/linux/latebindingsymboltable_linux.cc b/modules/audio_device/linux/latebindingsymboltable_linux.cc
index 35f53fa..7a66c34 100644
--- a/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -19,9 +19,9 @@
 namespace webrtc {
 namespace adm_linux {
 
-inline static const char *GetDllError() {
+inline static const char* GetDllError() {
 #ifdef WEBRTC_LINUX
-  char *err = dlerror();
+  char* err = dlerror();
   if (err) {
     return err;
   } else {
@@ -64,11 +64,11 @@
 }
 
 static bool LoadSymbol(DllHandle handle,
-                       const char *symbol_name,
-                       void **symbol) {
+                       const char* symbol_name,
+                       void** symbol) {
 #ifdef WEBRTC_LINUX
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  char* err = dlerror();
   if (err) {
     LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
     return false;
@@ -87,8 +87,8 @@
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
-                         const char *const symbol_names[],
-                         void *symbols[]) {
+                         const char* const symbol_names[],
+                         void* symbols[]) {
 #ifdef WEBRTC_LINUX
   // Clear any old errors.
   dlerror();
diff --git a/modules/audio_device/mac/audio_device_mac.cc b/modules/audio_device/mac/audio_device_mac.cc
index 27d1cc4..635bd0d 100644
--- a/modules/audio_device/mac/audio_device_mac.cc
+++ b/modules/audio_device/mac/audio_device_mac.cc
@@ -23,34 +23,31 @@
 
 namespace webrtc {
 
-#define WEBRTC_CA_RETURN_ON_ERR(expr)                                  \
+#define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+      return -1;                                                     \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr)                                      \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr)                                       \
   do {                                                                 \
     err = expr;                                                        \
     if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-      return -1;                                                       \
+      logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
     }                                                                  \
   } while (0)
 
-#define WEBRTC_CA_LOG_ERR(expr)                                        \
-  do {                                                                 \
-    err = expr;                                                        \
-    if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-    }                                                                  \
-  } while (0)
-
-#define WEBRTC_CA_LOG_WARN(expr)                                         \
-  do {                                                                   \
-    err = expr;                                                          \
-    if (err != noErr) {                                                  \
-      logCAMsg(rtc::LS_WARNING, "Error in " #expr,                       \
-               (const char*) & err);                                     \
-    }                                                                    \
-  } while (0)
-
 enum { MaxNumberDevices = 64 };
 
 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
@@ -94,7 +91,7 @@
   }
 #else
   // We need to flip the characters in this case.
-   switch (sev) {
+  switch (sev) {
     case rtc::LS_ERROR:
       LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
       break;
@@ -373,8 +370,8 @@
 
   err = AudioHardwareUnload();
   if (err != noErr) {
-    logCAMsg(rtc::LS_ERROR,
-             "Error in AudioHardwareUnload()", (const char*)&err);
+    logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
+             (const char*)&err);
     retVal = -1;
   }
 
@@ -1038,8 +1035,7 @@
       _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
 
   if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable output stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
              (const char*)&_outStreamFormat.mFormatID);
     return -1;
   }
@@ -1146,8 +1142,7 @@
       _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
 
   if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable input stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
              (const char*)&_inStreamFormat.mFormatID);
     return -1;
   }
@@ -1348,12 +1343,11 @@
       _critSect.Leave();  // Cannot be under lock, risk of deadlock
       if (kEventTimeout == _stopEventRec.Wait(2000)) {
         rtc::CritScope critScoped(&_critSect);
-        LOG(LS_WARNING)
-            << "Timed out stopping the capture IOProc."
-            << "We may have failed to detect a device removal.";
+        LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
+                        << "We may have failed to detect a device removal.";
         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
         WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+            AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
       }
       _critSect.Enter();
       _doStopRec = false;
@@ -1377,9 +1371,8 @@
       _critSect.Leave();  // Cannot be under lock, risk of deadlock
       if (kEventTimeout == _stopEvent.Wait(2000)) {
         rtc::CritScope critScoped(&_critSect);
-        LOG(LS_WARNING)
-            << "Timed out stopping the shared IOProc."
-            << "We may have failed to detect a device removal.";
+        LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
+                        << "We may have failed to detect a device removal.";
         // We assume rendering on a shared device has stopped as well if
         // the IOProc times out.
         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
@@ -1391,7 +1384,7 @@
       LOG(LS_INFO) << "Recording stopped (shared device)";
     } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
       WEBRTC_CA_LOG_WARN(
-            AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
       LOG(LS_INFO) << "Recording uninitialized (shared device)";
     }
   }
@@ -1486,9 +1479,8 @@
     _critSect.Leave();  // Cannot be under lock, risk of deadlock
     if (kEventTimeout == _stopEvent.Wait(2000)) {
       rtc::CritScope critScoped(&_critSect);
-      LOG(LS_WARNING)
-          << "Timed out stopping the render IOProc."
-          << "We may have failed to detect a device removal.";
+      LOG(LS_WARNING) << "Timed out stopping the render IOProc."
+                      << "We may have failed to detect a device removal.";
 
       // We assume capturing on a shared device has stopped as well if the
       // IOProc times out.
@@ -1501,11 +1493,11 @@
     LOG(LS_INFO) << "Playout stopped";
   } else if (_twoDevices && _playIsInitialized) {
     WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+        AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
     LOG(LS_INFO) << "Playout uninitialized (output device)";
   } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
     WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+        AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
     LOG(LS_INFO) << "Playout uninitialized (shared device)";
   }
 
@@ -1829,8 +1821,8 @@
     _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
   }
 
-  _renderDelayOffsetSamples = _renderBufSizeSamples -
-                              N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
+  _renderDelayOffsetSamples =
+      _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
                                   _outDesiredFormat.mChannelsPerFrame;
 
   _outDesiredFormat.mBytesPerPacket =
@@ -1909,9 +1901,9 @@
       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
 
   LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
-                  << _renderDelayOffsetSamples << ", _renderDelayUs="
-                  << _renderDelayUs << ", _renderLatencyUs="
-                  << _renderLatencyUs;
+                  << _renderDelayOffsetSamples
+                  << ", _renderDelayUs=" << _renderDelayUs
+                  << ", _renderLatencyUs=" << _renderLatencyUs;
   return 0;
 }
 
@@ -1970,8 +1962,8 @@
       AtomicSet32(&_captureDeviceIsAlive, 0);
       _mixerManager.CloseMicrophone();
     } else if (err != noErr) {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioDeviceGetProperty()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+               (const char*)&err);
       return -1;
     }
   }
@@ -1989,8 +1981,8 @@
       AtomicSet32(&_renderDeviceIsAlive, 0);
       _mixerManager.CloseSpeaker();
     } else if (err != noErr) {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioDeviceGetProperty()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+               (const char*)&err);
       return -1;
     }
   }
@@ -2016,8 +2008,7 @@
       objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
 
   if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable input stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
              (const char*)&streamFormat.mFormatID);
     return -1;
   }
@@ -2042,8 +2033,7 @@
   LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
                   << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
   LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
-  logCAMsg(rtc::LS_VERBOSE, "mFormatID",
-           (const char*)&streamFormat.mFormatID);
+  logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
 
   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
     const int io_block_size_samples = streamFormat.mChannelsPerFrame *
@@ -2247,8 +2237,8 @@
       LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
       return 1;
     } else {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+               (const char*)&err);
       return 1;
     }
   }
@@ -2485,8 +2475,8 @@
       // This is our own error.
       return false;
     } else {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+               (const char*)&err);
       return false;
     }
   }
diff --git a/modules/audio_device/mac/audio_mixer_manager_mac.cc b/modules/audio_device/mac/audio_mixer_manager_mac.cc
index 14d3f98..928fae7 100644
--- a/modules/audio_device/mac/audio_mixer_manager_mac.cc
+++ b/modules/audio_device/mac/audio_mixer_manager_mac.cc
@@ -14,34 +14,31 @@
 
 namespace webrtc {
 
-#define WEBRTC_CA_RETURN_ON_ERR(expr)                                  \
+#define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+      return -1;                                                     \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr)                                      \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr)                                       \
   do {                                                                 \
     err = expr;                                                        \
     if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-      return -1;                                                       \
+      logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
     }                                                                  \
   } while (0)
 
-#define WEBRTC_CA_LOG_ERR(expr)                                        \
-  do {                                                                 \
-    err = expr;                                                        \
-    if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-    }                                                                  \
-  } while (0)
-
-#define WEBRTC_CA_LOG_WARN(expr)                                         \
-  do {                                                                   \
-    err = expr;                                                          \
-    if (err != noErr) {                                                  \
-      logCAMsg(rtc::LS_WARNING, "Error in " #expr,                       \
-               (const char*) & err);                                     \
-    }                                                                    \
-  } while (0)
-
 AudioMixerManagerMac::AudioMixerManagerMac()
     : _inputDeviceID(kAudioObjectUnknown),
       _outputDeviceID(kAudioObjectUnknown),
@@ -876,8 +873,8 @@
 
 // CoreAudio errors are best interpreted as four character strings.
 void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev,
-                              const char* msg,
-                              const char* err) {
+                                    const char* msg,
+                                    const char* err) {
   RTC_DCHECK(msg != NULL);
   RTC_DCHECK(err != NULL);
   RTC_DCHECK(sev == rtc::LS_ERROR || sev == rtc::LS_WARNING);
@@ -895,7 +892,7 @@
   }
 #else
   // We need to flip the characters in this case.
-   switch (sev) {
+  switch (sev) {
     case rtc::LS_ERROR:
       LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
       break;
diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc
index 6fcbb6e..32c3f94 100644
--- a/modules/audio_device/win/audio_device_core_win.cc
+++ b/modules/audio_device/win/audio_device_core_win.cc
@@ -8,13 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#pragma warning(disable: 4995)  // name was marked as #pragma deprecated
+#pragma warning(disable : 4995)  // name was marked as #pragma deprecated
 
 #if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
 // Reports the major and minor versions of the compiler.
-// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version 13 and a 1.0 point release.
-// The Visual C++ 2005 compiler version is 1400.
-// Type cl /? at the command line to see the major and minor versions of your compiler along with the build number.
+// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version
+// 13 and a 1.0 point release. The Visual C++ 2005 compiler version is 1400.
+// Type cl /? at the command line to see the major and minor versions of your
+// compiler along with the build number.
 #pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
 #endif
 
@@ -27,13 +28,13 @@
 #include <assert.h>
 #include <string.h>
 
-#include <windows.h>
+#include <Functiondiscoverykeys_devpkey.h>
 #include <comdef.h>
 #include <dmo.h>
-#include <Functiondiscoverykeys_devpkey.h>
 #include <mmsystem.h>
 #include <strsafe.h>
 #include <uuids.h>
+#include <windows.h>
 
 #include <iomanip>
 
@@ -42,25 +43,38 @@
 #include "system_wrappers/include/sleep.h"
 
 // Macro that calls a COM method returning HRESULT value.
-#define EXIT_ON_ERROR(hres)    do { if (FAILED(hres)) goto Exit; } while(0)
+#define EXIT_ON_ERROR(hres) \
+  do {                      \
+    if (FAILED(hres))       \
+      goto Exit;            \
+  } while (0)
 
 // Macro that continues to a COM error.
-#define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
+#define CONTINUE_ON_ERROR(hres) \
+  do {                          \
+    if (FAILED(hres))           \
+      goto Next;                \
+  } while (0)
 
 // Macro that releases a COM object if not NULL.
-#define SAFE_RELEASE(p)     do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
+#define SAFE_RELEASE(p) \
+  do {                  \
+    if ((p)) {          \
+      (p)->Release();   \
+      (p) = NULL;       \
+    }                   \
+  } while (0)
 
-#define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
+#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5))
 
 // REFERENCE_TIME time units per millisecond
-#define REFTIMES_PER_MILLISEC  10000
+#define REFTIMES_PER_MILLISEC 10000
 
-typedef struct tagTHREADNAME_INFO
-{
-   DWORD dwType;        // must be 0x1000
-   LPCSTR szName;       // pointer to name (in user addr space)
-   DWORD dwThreadID;    // thread ID (-1=caller thread)
-   DWORD dwFlags;       // reserved for future use, must be zero
+typedef struct tagTHREADNAME_INFO {
+  DWORD dwType;      // must be 0x1000
+  LPCSTR szName;     // pointer to name (in user addr space)
+  DWORD dwThreadID;  // thread ID (-1=caller thread)
+  DWORD dwFlags;     // reserved for future use, must be zero
 } THREADNAME_INFO;
 
 namespace webrtc {
@@ -68,11 +82,7 @@
 
 enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
 
-enum
-{
-    kAecCaptureStreamIndex = 0,
-    kAecRenderStreamIndex = 1
-};
+enum { kAecCaptureStreamIndex = 0, kAecRenderStreamIndex = 1 };
 
 // An implementation of IMediaBuffer, as required for
 // IMediaObject::ProcessOutput(). After consuming data provided by
@@ -80,95 +90,75 @@
 //
 // Example implementation:
 // http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
-class MediaBufferImpl : public IMediaBuffer
-{
-public:
-    explicit MediaBufferImpl(DWORD maxLength)
-        : _data(new BYTE[maxLength]),
-          _length(0),
-          _maxLength(maxLength),
-          _refCount(0)
-    {}
+class MediaBufferImpl : public IMediaBuffer {
+ public:
+  explicit MediaBufferImpl(DWORD maxLength)
+      : _data(new BYTE[maxLength]),
+        _length(0),
+        _maxLength(maxLength),
+        _refCount(0) {}
 
-    // IMediaBuffer methods.
-    STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength))
-    {
-        if (!ppBuffer || !pcbLength)
-        {
-            return E_POINTER;
-        }
-
-        *ppBuffer = _data;
-        *pcbLength = _length;
-
-        return S_OK;
+  // IMediaBuffer methods.
+  STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) {
+    if (!ppBuffer || !pcbLength) {
+      return E_POINTER;
     }
 
-    STDMETHOD(GetMaxLength(DWORD* pcbMaxLength))
-    {
-        if (!pcbMaxLength)
-        {
-            return E_POINTER;
-        }
+    *ppBuffer = _data;
+    *pcbLength = _length;
 
-        *pcbMaxLength = _maxLength;
-        return S_OK;
+    return S_OK;
+  }
+
+  STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) {
+    if (!pcbMaxLength) {
+      return E_POINTER;
     }
 
-    STDMETHOD(SetLength(DWORD cbLength))
-    {
-        if (cbLength > _maxLength)
-        {
-            return E_INVALIDARG;
-        }
+    *pcbMaxLength = _maxLength;
+    return S_OK;
+  }
 
-        _length = cbLength;
-        return S_OK;
+  STDMETHOD(SetLength(DWORD cbLength)) {
+    if (cbLength > _maxLength) {
+      return E_INVALIDARG;
     }
 
-    // IUnknown methods.
-    STDMETHOD_(ULONG, AddRef())
-    {
-        return InterlockedIncrement(&_refCount);
+    _length = cbLength;
+    return S_OK;
+  }
+
+  // IUnknown methods.
+  STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); }
+
+  STDMETHOD(QueryInterface(REFIID riid, void** ppv)) {
+    if (!ppv) {
+      return E_POINTER;
+    } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) {
+      return E_NOINTERFACE;
     }
 
-    STDMETHOD(QueryInterface(REFIID riid, void** ppv))
-    {
-        if (!ppv)
-        {
-            return E_POINTER;
-        }
-        else if (riid != IID_IMediaBuffer && riid != IID_IUnknown)
-        {
-            return E_NOINTERFACE;
-        }
+    *ppv = static_cast<IMediaBuffer*>(this);
+    AddRef();
+    return S_OK;
+  }
 
-        *ppv = static_cast<IMediaBuffer*>(this);
-        AddRef();
-        return S_OK;
+  STDMETHOD_(ULONG, Release()) {
+    LONG refCount = InterlockedDecrement(&_refCount);
+    if (refCount == 0) {
+      delete this;
     }
 
-    STDMETHOD_(ULONG, Release())
-    {
-        LONG refCount = InterlockedDecrement(&_refCount);
-        if (refCount == 0)
-        {
-            delete this;
-        }
+    return refCount;
+  }
 
-        return refCount;
-    }
+ private:
+  ~MediaBufferImpl() { delete[] _data; }
 
-private:
-    ~MediaBufferImpl()
-    {
-        delete [] _data;
-    }
-
-    BYTE* _data;
-    DWORD _length;
-    const DWORD _maxLength;
-    LONG _refCount;
+  BYTE* _data;
+  DWORD _length;
+  const DWORD _maxLength;
+  LONG _refCount;
 };
 }  // namespace
 
@@ -180,228 +170,204 @@
 //  CoreAudioIsSupported
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::CoreAudioIsSupported()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+bool AudioDeviceWindowsCore::CoreAudioIsSupported() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    bool MMDeviceIsAvailable(false);
-    bool coreAudioIsSupported(false);
+  bool MMDeviceIsAvailable(false);
+  bool coreAudioIsSupported(false);
 
-    HRESULT hr(S_OK);
-    TCHAR buf[MAXERRORLENGTH];
-    TCHAR errorText[MAXERRORLENGTH];
+  HRESULT hr(S_OK);
+  TCHAR buf[MAXERRORLENGTH];
+  TCHAR errorText[MAXERRORLENGTH];
 
-    // 1) Check if Windows version is Vista SP1 or later.
-    //
-    // CoreAudio is only available on Vista SP1 and later.
-    //
-    OSVERSIONINFOEX osvi;
-    DWORDLONG dwlConditionMask = 0;
-    int op = VER_LESS_EQUAL;
+  // 1) Check if Windows version is Vista SP1 or later.
+  //
+  // CoreAudio is only available on Vista SP1 and later.
+  //
+  OSVERSIONINFOEX osvi;
+  DWORDLONG dwlConditionMask = 0;
+  int op = VER_LESS_EQUAL;
 
-    // Initialize the OSVERSIONINFOEX structure.
-    ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
-    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
-    osvi.dwMajorVersion = 6;
-    osvi.dwMinorVersion = 0;
-    osvi.wServicePackMajor = 0;
-    osvi.wServicePackMinor = 0;
-    osvi.wProductType = VER_NT_WORKSTATION;
+  // Initialize the OSVERSIONINFOEX structure.
+  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+  osvi.dwMajorVersion = 6;
+  osvi.dwMinorVersion = 0;
+  osvi.wServicePackMajor = 0;
+  osvi.wServicePackMinor = 0;
+  osvi.wProductType = VER_NT_WORKSTATION;
 
-    // Initialize the condition mask.
-    VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
+  // Initialize the condition mask.
+  VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
 
-    DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
-                       VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
-                       VER_PRODUCT_TYPE;
+  DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
+                     VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
+                     VER_PRODUCT_TYPE;
 
-    // Perform the test.
-    BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask,
-                                            dwlConditionMask);
-    if (isVistaRTMorXP != 0)
-    {
-        LOG(LS_VERBOSE)
-            << "*** Windows Core Audio is only supported on Vista SP1 or later"
-            << " => will revert to the Wave API ***";
-        return false;
+  // Perform the test.
+  BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask, dwlConditionMask);
+  if (isVistaRTMorXP != 0) {
+    LOG(LS_VERBOSE)
+        << "*** Windows Core Audio is only supported on Vista SP1 or later"
+        << " => will revert to the Wave API ***";
+    return false;
+  }
+
+  // 2) Initializes the COM library for use by the calling thread.
+
+  // The COM init wrapper sets the thread's concurrency model to MTA,
+  // and creates a new apartment for the thread if one is required. The
+  // wrapper also ensures that each call to CoInitializeEx is balanced
+  // by a corresponding call to CoUninitialize.
+  //
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    // Things will work even if an STA thread is calling this method but we
+    // want to ensure that MTA is used and therefore return false here.
+    return false;
+  }
+
+  // 3) Check if the MMDevice API is available.
+  //
+  // The Windows Multimedia Device (MMDevice) API enables audio clients to
+  // discover audio endpoint devices, determine their capabilities, and create
+  // driver instances for those devices.
+  // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+  // The MMDevice API consists of several interfaces. The first of these is the
+  // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice
+  // API, a client obtains a reference to the IMMDeviceEnumerator interface of a
+  // device-enumerator object by calling the CoCreateInstance function.
+  //
+  // Through the IMMDeviceEnumerator interface, the client can obtain references
+  // to the other interfaces in the MMDevice API. The MMDevice API implements
+  // the following interfaces:
+  //
+  // IMMDevice            Represents an audio device.
+  // IMMDeviceCollection  Represents a collection of audio devices.
+  // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
+  // IMMEndpoint          Represents an audio endpoint device.
+  //
+  IMMDeviceEnumerator* pIMMD(NULL);
+  const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
+  const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+
+  hr = CoCreateInstance(
+      CLSID_MMDeviceEnumerator,  // GUID value of MMDeviceEnumerator coclass
+      NULL, CLSCTX_ALL,
+      IID_IMMDeviceEnumerator,  // GUID value of the IMMDeviceEnumerator
+                                // interface
+      (void**)&pIMMD);
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                  << " Failed to create the required COM object (hr=" << hr
+                  << ")";
+    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                    << " CoCreateInstance(MMDeviceEnumerator) failed (hr=" << hr
+                    << ")";
+
+    const DWORD dwFlags =
+        FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+    const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+    // Gets the system's human readable message string for this HRESULT.
+    // All error message in English by default.
+    DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+                                           MAXERRORLENGTH, NULL);
+
+    assert(messageLength <= MAXERRORLENGTH);
+
+    // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+    for (; messageLength && ::isspace(errorText[messageLength - 1]);
+         --messageLength) {
+      errorText[messageLength - 1] = '\0';
     }
 
-    // 2) Initializes the COM library for use by the calling thread.
+    StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
+    StringCchCat(buf, MAXERRORLENGTH, errorText);
+    LOG(LS_VERBOSE) << buf;
+  } else {
+    MMDeviceIsAvailable = true;
+    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                    << " CoCreateInstance(MMDeviceEnumerator) succeeded (hr="
+                    << hr << ")";
+    SAFE_RELEASE(pIMMD);
+  }
 
-    // The COM init wrapper sets the thread's concurrency model to MTA,
-    // and creates a new apartment for the thread if one is required. The
-    // wrapper also ensures that each call to CoInitializeEx is balanced
-    // by a corresponding call to CoUninitialize.
-    //
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      // Things will work even if an STA thread is calling this method but we
-      // want to ensure that MTA is used and therefore return false here.
+  // 4) Verify that we can create and initialize our Core Audio class.
+  //
+  // Also, perform a limited "API test" to ensure that Core Audio is supported
+  // for all devices.
+  //
+  if (MMDeviceIsAvailable) {
+    coreAudioIsSupported = false;
+
+    AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore();
+    if (p == NULL) {
       return false;
     }
 
-    // 3) Check if the MMDevice API is available.
-    //
-    // The Windows Multimedia Device (MMDevice) API enables audio clients to
-    // discover audio endpoint devices, determine their capabilities, and create
-    // driver instances for those devices.
-    // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
-    // The MMDevice API consists of several interfaces. The first of these is the
-    // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice API,
-    // a client obtains a reference to the IMMDeviceEnumerator interface of a
-    // device-enumerator object by calling the CoCreateInstance function.
-    //
-    // Through the IMMDeviceEnumerator interface, the client can obtain references
-    // to the other interfaces in the MMDevice API. The MMDevice API implements
-    // the following interfaces:
-    //
-    // IMMDevice            Represents an audio device.
-    // IMMDeviceCollection  Represents a collection of audio devices.
-    // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
-    // IMMEndpoint          Represents an audio endpoint device.
-    //
-    IMMDeviceEnumerator* pIMMD(NULL);
-    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
-    const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+    int ok(0);
+    int temp_ok(0);
+    bool available(false);
 
-    hr = CoCreateInstance(
-            CLSID_MMDeviceEnumerator,   // GUID value of MMDeviceEnumerator coclass
-            NULL,
-            CLSCTX_ALL,
-            IID_IMMDeviceEnumerator,    // GUID value of the IMMDeviceEnumerator interface
-            (void**)&pIMMD );
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                      << " Failed to create the required COM object (hr="
-                      << hr << ")";
-        LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                        << " CoCreateInstance(MMDeviceEnumerator) failed (hr="
-                        << hr << ")";
-
-        const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
-                              FORMAT_MESSAGE_IGNORE_INSERTS;
-        const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
-
-        // Gets the system's human readable message string for this HRESULT.
-        // All error message in English by default.
-        DWORD messageLength = ::FormatMessageW(dwFlags,
-                                               0,
-                                               hr,
-                                               dwLangID,
-                                               errorText,
-                                               MAXERRORLENGTH,
-                                               NULL);
-
-        assert(messageLength <= MAXERRORLENGTH);
-
-        // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
-        for (; messageLength && ::isspace(errorText[messageLength - 1]);
-             --messageLength)
-        {
-            errorText[messageLength - 1] = '\0';
-        }
-
-        StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
-        StringCchCat(buf, MAXERRORLENGTH, errorText);
-        LOG(LS_VERBOSE) << buf;
-    }
-    else
-    {
-        MMDeviceIsAvailable = true;
-        LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-            << " CoCreateInstance(MMDeviceEnumerator) succeeded (hr=" << hr
-            << ")";
-        SAFE_RELEASE(pIMMD);
+    if (p->Init() != InitStatus::OK) {
+      ok |= -1;
     }
 
-    // 4) Verify that we can create and initialize our Core Audio class.
-    //
-    // Also, perform a limited "API test" to ensure that Core Audio is supported for all devices.
-    //
-    if (MMDeviceIsAvailable)
-    {
-        coreAudioIsSupported = false;
-
-        AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore();
-        if (p == NULL)
-        {
-            return false;
-        }
-
-        int ok(0);
-        int temp_ok(0);
-        bool available(false);
-
-        if (p->Init() != InitStatus::OK) {
-          ok |= -1;
-        }
-
-        int16_t numDevsRec = p->RecordingDevices();
-        for (uint16_t i = 0; i < numDevsRec; i++)
-        {
-            ok |= p->SetRecordingDevice(i);
-            temp_ok = p->RecordingIsAvailable(available);
-            ok |= temp_ok;
-            ok |= (available == false);
-            if (available)
-            {
-                ok |= p->InitMicrophone();
-            }
-            if (ok)
-            {
-                LOG(LS_WARNING)
-                    << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                    << " Failed to use Core Audio Recording for device id="
-                    << i;
-            }
-        }
-
-        int16_t numDevsPlay = p->PlayoutDevices();
-        for (uint16_t i = 0; i < numDevsPlay; i++)
-        {
-            ok |= p->SetPlayoutDevice(i);
-            temp_ok = p->PlayoutIsAvailable(available);
-            ok |= temp_ok;
-            ok |= (available == false);
-            if (available)
-            {
-                ok |= p->InitSpeaker();
-            }
-            if (ok)
-            {
-                LOG(LS_WARNING)
-                    << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                    << " Failed to use Core Audio Playout for device id=" << i;
-            }
-        }
-
-        ok |= p->Terminate();
-
-        if (ok == 0)
-        {
-            coreAudioIsSupported = true;
-        }
-
-        delete p;
+    int16_t numDevsRec = p->RecordingDevices();
+    for (uint16_t i = 0; i < numDevsRec; i++) {
+      ok |= p->SetRecordingDevice(i);
+      temp_ok = p->RecordingIsAvailable(available);
+      ok |= temp_ok;
+      ok |= (available == false);
+      if (available) {
+        ok |= p->InitMicrophone();
+      }
+      if (ok) {
+        LOG(LS_WARNING) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                        << " Failed to use Core Audio Recording for device id="
+                        << i;
+      }
     }
 
-    if (coreAudioIsSupported)
-    {
-        LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported"
-                        << " => will revert to the Wave API ***";
+    int16_t numDevsPlay = p->PlayoutDevices();
+    for (uint16_t i = 0; i < numDevsPlay; i++) {
+      ok |= p->SetPlayoutDevice(i);
+      temp_ok = p->PlayoutIsAvailable(available);
+      ok |= temp_ok;
+      ok |= (available == false);
+      if (available) {
+        ok |= p->InitSpeaker();
+      }
+      if (ok) {
+        LOG(LS_WARNING) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                        << " Failed to use Core Audio Playout for device id="
+                        << i;
+      }
     }
 
-    return (coreAudioIsSupported);
+    ok |= p->Terminate();
+
+    if (ok == 0) {
+      coreAudioIsSupported = true;
+    }
+
+    delete p;
+  }
+
+  if (coreAudioIsSupported) {
+    LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
+  } else {
+    LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported"
+                    << " => will revert to the Wave API ***";
+  }
+
+  return (coreAudioIsSupported);
 }
 
 // ============================================================================
@@ -566,76 +532,62 @@
 //  AudioDeviceWindowsCore() - dtor
 // ----------------------------------------------------------------------------
 
-AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioDeviceWindowsCore::~AudioDeviceWindowsCore() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Terminate();
+  Terminate();
 
-    // The IMMDeviceEnumerator is created during construction. Must release
-    // it here and not in Terminate() since we don't recreate it in Init().
-    SAFE_RELEASE(_ptrEnumerator);
+  // The IMMDeviceEnumerator is created during construction. Must release
+  // it here and not in Terminate() since we don't recreate it in Init().
+  SAFE_RELEASE(_ptrEnumerator);
 
-    _ptrAudioBuffer = NULL;
+  _ptrAudioBuffer = NULL;
 
-    if (NULL != _hRenderSamplesReadyEvent)
-    {
-        CloseHandle(_hRenderSamplesReadyEvent);
-        _hRenderSamplesReadyEvent = NULL;
+  if (NULL != _hRenderSamplesReadyEvent) {
+    CloseHandle(_hRenderSamplesReadyEvent);
+    _hRenderSamplesReadyEvent = NULL;
+  }
+
+  if (NULL != _hCaptureSamplesReadyEvent) {
+    CloseHandle(_hCaptureSamplesReadyEvent);
+    _hCaptureSamplesReadyEvent = NULL;
+  }
+
+  if (NULL != _hRenderStartedEvent) {
+    CloseHandle(_hRenderStartedEvent);
+    _hRenderStartedEvent = NULL;
+  }
+
+  if (NULL != _hCaptureStartedEvent) {
+    CloseHandle(_hCaptureStartedEvent);
+    _hCaptureStartedEvent = NULL;
+  }
+
+  if (NULL != _hShutdownRenderEvent) {
+    CloseHandle(_hShutdownRenderEvent);
+    _hShutdownRenderEvent = NULL;
+  }
+
+  if (NULL != _hShutdownCaptureEvent) {
+    CloseHandle(_hShutdownCaptureEvent);
+    _hShutdownCaptureEvent = NULL;
+  }
+
+  if (NULL != _hSetCaptureVolumeEvent) {
+    CloseHandle(_hSetCaptureVolumeEvent);
+    _hSetCaptureVolumeEvent = NULL;
+  }
+
+  if (_avrtLibrary) {
+    BOOL freeOK = FreeLibrary(_avrtLibrary);
+    if (!freeOK) {
+      LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+                      << " failed to free the loaded Avrt DLL module correctly";
+    } else {
+      LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+                      << " the Avrt DLL module is now unloaded";
     }
-
-    if (NULL != _hCaptureSamplesReadyEvent)
-    {
-        CloseHandle(_hCaptureSamplesReadyEvent);
-        _hCaptureSamplesReadyEvent = NULL;
-    }
-
-    if (NULL != _hRenderStartedEvent)
-    {
-        CloseHandle(_hRenderStartedEvent);
-        _hRenderStartedEvent = NULL;
-    }
-
-    if (NULL != _hCaptureStartedEvent)
-    {
-        CloseHandle(_hCaptureStartedEvent);
-        _hCaptureStartedEvent = NULL;
-    }
-
-    if (NULL != _hShutdownRenderEvent)
-    {
-        CloseHandle(_hShutdownRenderEvent);
-        _hShutdownRenderEvent = NULL;
-    }
-
-    if (NULL != _hShutdownCaptureEvent)
-    {
-        CloseHandle(_hShutdownCaptureEvent);
-        _hShutdownCaptureEvent = NULL;
-    }
-
-    if (NULL != _hSetCaptureVolumeEvent)
-    {
-        CloseHandle(_hSetCaptureVolumeEvent);
-        _hSetCaptureVolumeEvent = NULL;
-    }
-
-    if (_avrtLibrary)
-    {
-        BOOL freeOK = FreeLibrary(_avrtLibrary);
-        if (!freeOK)
-        {
-            LOG(LS_WARNING)
-                << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
-                << " failed to free the loaded Avrt DLL module correctly";
-        }
-        else
-        {
-            LOG(LS_WARNING)
-                << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
-                << " the Avrt DLL module is now unloaded";
-        }
-    }
+  }
 }
 
 // ============================================================================
@@ -646,28 +598,26 @@
 //  AttachAudioBuffer
 // ----------------------------------------------------------------------------
 
-void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
-{
+void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  _ptrAudioBuffer = audioBuffer;
 
-    _ptrAudioBuffer = audioBuffer;
-
-    // Inform the AudioBuffer about default settings for this implementation.
-    // Set all values to zero here since the actual settings will be done by
-    // InitPlayout and InitRecording later.
-    _ptrAudioBuffer->SetRecordingSampleRate(0);
-    _ptrAudioBuffer->SetPlayoutSampleRate(0);
-    _ptrAudioBuffer->SetRecordingChannels(0);
-    _ptrAudioBuffer->SetPlayoutChannels(0);
+  // Inform the AudioBuffer about default settings for this implementation.
+  // Set all values to zero here since the actual settings will be done by
+  // InitPlayout and InitRecording later.
+  _ptrAudioBuffer->SetRecordingSampleRate(0);
+  _ptrAudioBuffer->SetPlayoutSampleRate(0);
+  _ptrAudioBuffer->SetRecordingChannels(0);
+  _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
 // ----------------------------------------------------------------------------
 //  ActiveAudioLayer
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
-{
-    audioLayer = AudioDeviceModule::kWindowsCoreAudio;
-    return 0;
+int32_t AudioDeviceWindowsCore::ActiveAudioLayer(
+    AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kWindowsCoreAudio;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -696,339 +646,297 @@
 //  Terminate
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::Terminate()
-{
+int32_t AudioDeviceWindowsCore::Terminate() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
-
-    if (!_initialized) {
-        return 0;
-    }
-
-    _initialized = false;
-    _speakerIsInitialized = false;
-    _microphoneIsInitialized = false;
-    _playing = false;
-    _recording = false;
-
-    SAFE_RELEASE(_ptrRenderCollection);
-    SAFE_RELEASE(_ptrCaptureCollection);
-    SAFE_RELEASE(_ptrDeviceOut);
-    SAFE_RELEASE(_ptrDeviceIn);
-    SAFE_RELEASE(_ptrClientOut);
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrRenderClient);
-    SAFE_RELEASE(_ptrCaptureClient);
-    SAFE_RELEASE(_ptrCaptureVolume);
-    SAFE_RELEASE(_ptrRenderSimpleVolume);
-
+  if (!_initialized) {
     return 0;
+  }
+
+  _initialized = false;
+  _speakerIsInitialized = false;
+  _microphoneIsInitialized = false;
+  _playing = false;
+  _recording = false;
+
+  SAFE_RELEASE(_ptrRenderCollection);
+  SAFE_RELEASE(_ptrCaptureCollection);
+  SAFE_RELEASE(_ptrDeviceOut);
+  SAFE_RELEASE(_ptrDeviceIn);
+  SAFE_RELEASE(_ptrClientOut);
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrRenderClient);
+  SAFE_RELEASE(_ptrCaptureClient);
+  SAFE_RELEASE(_ptrCaptureVolume);
+  SAFE_RELEASE(_ptrRenderSimpleVolume);
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  Initialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Initialized() const
-{
-    return (_initialized);
+bool AudioDeviceWindowsCore::Initialized() const {
+  return (_initialized);
 }
 
 // ----------------------------------------------------------------------------
 //  InitSpeaker
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitSpeaker()
-{
+int32_t AudioDeviceWindowsCore::InitSpeaker() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_playing) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  if (_usingOutputDeviceIndex) {
+    int16_t nDevices = PlayoutDevices();
+    if (_outputDeviceIndex > (nDevices - 1)) {
+      LOG(LS_ERROR) << "current device selection is invalid => unable to"
+                    << " initialize";
+      return -1;
     }
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  int32_t ret(0);
 
-    if (_usingOutputDeviceIndex)
-    {
-        int16_t nDevices = PlayoutDevices();
-        if (_outputDeviceIndex > (nDevices - 1))
-        {
-            LOG(LS_ERROR) << "current device selection is invalid => unable to"
-                          << " initialize";
-            return -1;
-        }
-    }
+  SAFE_RELEASE(_ptrDeviceOut);
+  if (_usingOutputDeviceIndex) {
+    // Refresh the selected rendering endpoint device using current index
+    ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
+  } else {
+    ERole role;
+    (_outputDevice == AudioDeviceModule::kDefaultDevice)
+        ? role = eConsole
+        : role = eCommunications;
+    // Refresh the selected rendering endpoint device using role
+    ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
+  }
 
-    int32_t ret(0);
-
+  if (ret != 0 || (_ptrDeviceOut == NULL)) {
+    LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
     SAFE_RELEASE(_ptrDeviceOut);
-    if (_usingOutputDeviceIndex)
-    {
-        // Refresh the selected rendering endpoint device using current index
-        ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
-    }
-    else
-    {
-        ERole role;
-        (_outputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
-        // Refresh the selected rendering endpoint device using role
-        ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
-    }
+    return -1;
+  }
 
-    if (ret != 0 || (_ptrDeviceOut == NULL))
-    {
-        LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
-
-    IAudioSessionManager* pManager = NULL;
-    ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
-                                  CLSCTX_ALL,
-                                  NULL,
-                                  (void**)&pManager);
-    if (ret != 0 || pManager == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the render manager";
-        SAFE_RELEASE(pManager);
-        return -1;
-    }
-
-    SAFE_RELEASE(_ptrRenderSimpleVolume);
-    ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
-    if (ret != 0 || _ptrRenderSimpleVolume == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the render simple volume";
-        SAFE_RELEASE(pManager);
-        SAFE_RELEASE(_ptrRenderSimpleVolume);
-        return -1;
-    }
+  IAudioSessionManager* pManager = NULL;
+  ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+                                NULL, (void**)&pManager);
+  if (ret != 0 || pManager == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the render manager";
     SAFE_RELEASE(pManager);
+    return -1;
+  }
 
-    _speakerIsInitialized = true;
+  SAFE_RELEASE(_ptrRenderSimpleVolume);
+  ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
+  if (ret != 0 || _ptrRenderSimpleVolume == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the render simple volume";
+    SAFE_RELEASE(pManager);
+    SAFE_RELEASE(_ptrRenderSimpleVolume);
+    return -1;
+  }
+  SAFE_RELEASE(pManager);
 
-    return 0;
+  _speakerIsInitialized = true;
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitMicrophone
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitMicrophone()
-{
+int32_t AudioDeviceWindowsCore::InitMicrophone() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_recording) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  if (_usingInputDeviceIndex) {
+    int16_t nDevices = RecordingDevices();
+    if (_inputDeviceIndex > (nDevices - 1)) {
+      LOG(LS_ERROR) << "current device selection is invalid => unable to"
+                    << " initialize";
+      return -1;
     }
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  int32_t ret(0);
 
-    if (_usingInputDeviceIndex)
-    {
-        int16_t nDevices = RecordingDevices();
-        if (_inputDeviceIndex > (nDevices - 1))
-        {
-            LOG(LS_ERROR) << "current device selection is invalid => unable to"
-                          << " initialize";
-            return -1;
-        }
-    }
+  SAFE_RELEASE(_ptrDeviceIn);
+  if (_usingInputDeviceIndex) {
+    // Refresh the selected capture endpoint device using current index
+    ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
+  } else {
+    ERole role;
+    (_inputDevice == AudioDeviceModule::kDefaultDevice)
+        ? role = eConsole
+        : role = eCommunications;
+    // Refresh the selected capture endpoint device using role
+    ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
+  }
 
-    int32_t ret(0);
-
+  if (ret != 0 || (_ptrDeviceIn == NULL)) {
+    LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
     SAFE_RELEASE(_ptrDeviceIn);
-    if (_usingInputDeviceIndex)
-    {
-        // Refresh the selected capture endpoint device using current index
-        ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
-    }
-    else
-    {
-        ERole role;
-        (_inputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
-        // Refresh the selected capture endpoint device using role
-        ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
-    }
+    return -1;
+  }
 
-    if (ret != 0 || (_ptrDeviceIn == NULL))
-    {
-        LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
-
+  SAFE_RELEASE(_ptrCaptureVolume);
+  ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&_ptrCaptureVolume));
+  if (ret != 0 || _ptrCaptureVolume == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the capture volume";
     SAFE_RELEASE(_ptrCaptureVolume);
-    ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
-                                 CLSCTX_ALL,
-                                 NULL,
-                                 reinterpret_cast<void **>(&_ptrCaptureVolume));
-    if (ret != 0 || _ptrCaptureVolume == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the capture volume";
-        SAFE_RELEASE(_ptrCaptureVolume);
-        return -1;
-    }
+    return -1;
+  }
 
-    _microphoneIsInitialized = true;
+  _microphoneIsInitialized = true;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::SpeakerIsInitialized() const
-{
-
-    return (_speakerIsInitialized);
+bool AudioDeviceWindowsCore::SpeakerIsInitialized() const {
+  return (_speakerIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const
-{
-
-    return (_microphoneIsInitialized);
+bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const {
+  return (_microphoneIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerVolumeIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioSessionManager* pManager = NULL;
+  ISimpleAudioVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioSessionManager* pManager = NULL;
-    ISimpleAudioVolume* pVolume = NULL;
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL,
+                               (void**)&pManager);
+  EXIT_ON_ERROR(hr);
 
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
-    EXIT_ON_ERROR(hr);
+  hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
+  EXIT_ON_ERROR(hr);
 
-    hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
-    EXIT_ON_ERROR(hr);
+  float volume(0.0f);
+  hr = pVolume->GetMasterVolume(&volume);
+  if (FAILED(hr)) {
+    available = false;
+  }
+  available = true;
 
-    float volume(0.0f);
-    hr = pVolume->GetMasterVolume(&volume);
-    if (FAILED(hr))
-    {
-        available = false;
-    }
-    available = true;
+  SAFE_RELEASE(pManager);
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pManager);
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pManager);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pManager);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetSpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
-{
+int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume) {
+  {
+    rtc::CritScope lock(&_critSect);
 
-    {
-        rtc::CritScope lock(&_critSect);
-
-        if (!_speakerIsInitialized)
-        {
-        return -1;
-        }
-
-        if (_ptrDeviceOut == NULL)
-        {
-            return -1;
-        }
+    if (!_speakerIsInitialized) {
+      return -1;
     }
 
-    if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
-        volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME)
-    {
-        return -1;
+    if (_ptrDeviceOut == NULL) {
+      return -1;
     }
+  }
 
-    HRESULT hr = S_OK;
+  if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
+      volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME) {
+    return -1;
+  }
 
-    // scale input volume to valid range (0.0 to 1.0)
-    const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
-    _volumeMutex.Enter();
-    hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  HRESULT hr = S_OK;
 
-    return 0;
+  // scale input volume to valid range (0.0 to 1.0)
+  const float fLevel = (float)volume / MAX_CORE_SPEAKER_VOLUME;
+  _volumeMutex.Enter();
+  hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, NULL);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const {
+  {
+    rtc::CritScope lock(&_critSect);
 
-    {
-        rtc::CritScope lock(&_critSect);
-
-        if (!_speakerIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceOut == NULL)
-        {
-            return -1;
-        }
+    if (!_speakerIsInitialized) {
+      return -1;
     }
 
-    HRESULT hr = S_OK;
-    float fLevel(0.0f);
+    if (_ptrDeviceOut == NULL) {
+      return -1;
+    }
+  }
 
-    _volumeMutex.Enter();
-    hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  HRESULT hr = S_OK;
+  float fLevel(0.0f);
 
-    // scale input volume range [0.0,1.0] to valid output range
-    volume = static_cast<uint32_t> (fLevel*MAX_CORE_SPEAKER_VOLUME);
+  _volumeMutex.Enter();
+  hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
 
-    return 0;
+  // scale input volume range [0.0,1.0] to valid output range
+  volume = static_cast<uint32_t>(fLevel * MAX_CORE_SPEAKER_VOLUME);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
@@ -1040,503 +948,451 @@
 //  how it is used today in VoE.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
+int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  maxVolume = static_cast<uint32_t>(MAX_CORE_SPEAKER_VOLUME);
 
-    maxVolume = static_cast<uint32_t> (MAX_CORE_SPEAKER_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  MinSpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const
-{
+int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(MIN_CORE_SPEAKER_VOLUME);
 
-    minVolume = static_cast<uint32_t> (MIN_CORE_SPEAKER_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerMuteIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
-        CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  if (FAILED(hr))
+    available = false;
+  else
+    available = true;
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    if (FAILED(hr))
-        available = false;
-    else
-        available = true;
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetSpeakerMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Set the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Set the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  const BOOL mute(enable);
+  hr = pVolume->SetMute(mute, NULL);
+  EXIT_ON_ERROR(hr);
 
-    const BOOL mute(enable);
-    hr = pVolume->SetMute(mute, NULL);
-    EXIT_ON_ERROR(hr);
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  EXIT_ON_ERROR(hr);
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    EXIT_ON_ERROR(hr);
+  enabled = (mute == TRUE) ? true : false;
 
-    enabled = (mute == TRUE) ? true : false;
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMuteIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  if (FAILED(hr))
+    available = false;
+  else
+    available = true;
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    if (FAILED(hr))
-        available = false;
-    else
-        available = true;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetMicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable) {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Set the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Set the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  const BOOL mute(enable);
+  hr = pVolume->SetMute(mute, NULL);
+  EXIT_ON_ERROR(hr);
 
-    const BOOL mute(enable);
-    hr = pVolume->SetMute(mute, NULL);
-    EXIT_ON_ERROR(hr);
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  EXIT_ON_ERROR(hr);
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    EXIT_ON_ERROR(hr);
+  enabled = (mute == TRUE) ? true : false;
 
-    enabled = (mute == TRUE) ? true : false;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoRecordingIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available)
-{
-
-    available = true;
-    return 0;
+int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available) {
+  available = true;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetStereoRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (enable) {
+    _recChannelsPrioList[0] = 2;  // try stereo first
+    _recChannelsPrioList[1] = 1;
+    _recChannels = 2;
+  } else {
+    _recChannelsPrioList[0] = 1;  // try mono first
+    _recChannelsPrioList[1] = 2;
+    _recChannels = 1;
+  }
 
-    if (enable)
-    {
-        _recChannelsPrioList[0] = 2;    // try stereo first
-        _recChannelsPrioList[1] = 1;
-        _recChannels = 2;
-    }
-    else
-    {
-        _recChannelsPrioList[0] = 1;    // try mono first
-        _recChannelsPrioList[1] = 2;
-        _recChannels = 1;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const {
+  if (_recChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    if (_recChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoPlayoutIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available)
-{
-
-    available = true;
-    return 0;
+int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available) {
+  available = true;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetStereoPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (enable) {
+    _playChannelsPrioList[0] = 2;  // try stereo first
+    _playChannelsPrioList[1] = 1;
+    _playChannels = 2;
+  } else {
+    _playChannelsPrioList[0] = 1;  // try mono first
+    _playChannelsPrioList[1] = 2;
+    _playChannels = 1;
+  }
 
-    if (enable)
-    {
-        _playChannelsPrioList[0] = 2;    // try stereo first
-        _playChannelsPrioList[1] = 1;
-        _playChannels = 2;
-    }
-    else
-    {
-        _playChannelsPrioList[0] = 1;    // try mono first
-        _playChannelsPrioList[1] = 2;
-        _playChannels = 1;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const {
+  if (_playChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    if (_playChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetAGC
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetAGC(bool enable)
-{
-    rtc::CritScope lock(&_critSect);
-    _AGC = enable;
-    return 0;
+int32_t AudioDeviceWindowsCore::SetAGC(bool enable) {
+  rtc::CritScope lock(&_critSect);
+  _AGC = enable;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  AGC
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::AGC() const
-{
-    rtc::CritScope lock(&_critSect);
-    return _AGC;
+bool AudioDeviceWindowsCore::AGC() const {
+  rtc::CritScope lock(&_critSect);
+  return _AGC;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneVolumeIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  float volume(0.0f);
+  hr = pVolume->GetMasterVolumeLevelScalar(&volume);
+  if (FAILED(hr)) {
+    available = false;
+  }
+  available = true;
 
-    float volume(0.0f);
-    hr = pVolume->GetMasterVolumeLevelScalar(&volume);
-    if (FAILED(hr))
-    {
-        available = false;
-    }
-    available = true;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetMicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
-                    << volume << ")";
+int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    {
-        rtc::CritScope lock(&_critSect);
+  {
+    rtc::CritScope lock(&_critSect);
 
-        if (!_microphoneIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceIn == NULL)
-        {
-            return -1;
-        }
+    if (!_microphoneIsInitialized) {
+      return -1;
     }
 
-    if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
-        volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME))
-    {
-        return -1;
+    if (_ptrDeviceIn == NULL) {
+      return -1;
     }
+  }
 
-    HRESULT hr = S_OK;
-    // scale input volume to valid range (0.0 to 1.0)
-    const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
-    _volumeMutex.Enter();
-    _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
+      volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME)) {
+    return -1;
+  }
 
-    return 0;
+  HRESULT hr = S_OK;
+  // scale input volume to valid range (0.0 to 1.0)
+  const float fLevel = static_cast<float>(volume) / MAX_CORE_MICROPHONE_VOLUME;
+  _volumeMutex.Enter();
+  _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
-{
-    {
-        rtc::CritScope lock(&_critSect);
+int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const {
+  {
+    rtc::CritScope lock(&_critSect);
 
-        if (!_microphoneIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceIn == NULL)
-        {
-            return -1;
-        }
+    if (!_microphoneIsInitialized) {
+      return -1;
     }
 
-    HRESULT hr = S_OK;
-    float fLevel(0.0f);
-    volume = 0;
-    _volumeMutex.Enter();
-    hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+    if (_ptrDeviceIn == NULL) {
+      return -1;
+    }
+  }
 
-    // scale input volume range [0.0,1.0] to valid output range
-    volume = static_cast<uint32_t> (fLevel*MAX_CORE_MICROPHONE_VOLUME);
+  HRESULT hr = S_OK;
+  float fLevel(0.0f);
+  volume = 0;
+  _volumeMutex.Enter();
+  hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
 
-    return 0;
+  // scale input volume range [0.0,1.0] to valid output range
+  volume = static_cast<uint32_t>(fLevel * MAX_CORE_MICROPHONE_VOLUME);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
@@ -1548,166 +1404,142 @@
 //  how it is used today in VoE.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    maxVolume = static_cast<uint32_t> (MAX_CORE_MICROPHONE_VOLUME);
+  maxVolume = static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME);
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  MinMicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const
-{
+int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME);
 
-    minVolume = static_cast<uint32_t> (MIN_CORE_MICROPHONE_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutDevices
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::PlayoutDevices()
-{
+int16_t AudioDeviceWindowsCore::PlayoutDevices() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_RefreshDeviceList(eRender) != -1) {
+    return (_DeviceListCount(eRender));
+  }
 
-    if (_RefreshDeviceList(eRender) != -1)
-    {
-        return (_DeviceListCount(eRender));
-    }
-
-    return -1;
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetPlayoutDevice I (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
-{
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) {
+  if (_playIsInitialized) {
+    return -1;
+  }
 
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+  // Get current number of available rendering endpoint devices and refresh the
+  // rendering collection.
+  UINT nDevices = PlayoutDevices();
 
-    // Get current number of available rendering endpoint devices and refresh the rendering collection.
-    UINT nDevices = PlayoutDevices();
+  if (index < 0 || index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index < 0 || index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  HRESULT hr(S_OK);
 
-    HRESULT hr(S_OK);
+  assert(_ptrRenderCollection != NULL);
 
-    assert(_ptrRenderCollection != NULL);
-
-    //  Select an endpoint rendering device given the specified index
+  //  Select an endpoint rendering device given the specified index
+  SAFE_RELEASE(_ptrDeviceOut);
+  hr = _ptrRenderCollection->Item(index, &_ptrDeviceOut);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceOut);
-    hr = _ptrRenderCollection->Item(
-                                 index,
-                                 &_ptrDeviceOut);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingOutputDeviceIndex = true;
-    _outputDeviceIndex = index;
+  _usingOutputDeviceIndex = true;
+  _outputDeviceIndex = index;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetPlayoutDevice II (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
-{
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(
+    AudioDeviceModule::WindowsDeviceType device) {
+  if (_playIsInitialized) {
+    return -1;
+  }
 
-    ERole role(eCommunications);
+  ERole role(eCommunications);
 
-    if (device == AudioDeviceModule::kDefaultDevice)
-    {
-        role = eConsole;
-    }
-    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
-    {
-        role = eCommunications;
-    }
+  if (device == AudioDeviceModule::kDefaultDevice) {
+    role = eConsole;
+  } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+    role = eCommunications;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    // Refresh the list of rendering endpoint devices
-    _RefreshDeviceList(eRender);
+  // Refresh the list of rendering endpoint devices
+  _RefreshDeviceList(eRender);
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    //  Select an endpoint rendering device given the specified role
+  //  Select an endpoint rendering device given the specified role
+  SAFE_RELEASE(_ptrDeviceOut);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(eRender, role, &_ptrDeviceOut);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceOut);
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           eRender,
-                           role,
-                           &_ptrDeviceOut);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingOutputDeviceIndex = false;
-    _outputDevice = device;
+  _usingOutputDeviceIndex = false;
+  _outputDevice = device;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -1717,81 +1549,70 @@
 int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  bool defaultCommunicationDevice(false);
+  const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
 
-    bool defaultCommunicationDevice(false);
-    const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
+  // Special fix for the case when the user selects '-1' as index (<=> Default
+  // Communication Device)
+  if (index == (uint16_t)(-1)) {
+    defaultCommunicationDevice = true;
+    index = 0;
+    LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  }
 
-    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
-    if (index == (uint16_t)(-1))
-    {
-        defaultCommunicationDevice = true;
-        index = 0;
-        LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
+
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  int32_t ret(-1);
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+  // Get the endpoint device's friendly-name
+  if (defaultCommunicationDevice) {
+    ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName,
+                                bufferLen);
+  } else {
+    ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
+  }
+
+  if (ret == 0) {
+    // Convert the endpoint device's friendly-name to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+                            kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
+  // Get the endpoint ID string (uniquely identifies the device among all audio
+  // endpoint devices)
+  if (defaultCommunicationDevice) {
+    ret =
+        _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
+  } else {
+    ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
+  }
+
+  if (guid != NULL && ret == 0) {
+    // Convert the endpoint device's ID string to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+                            NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
-
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    rtc::CritScope lock(&_critSect);
-
-    int32_t ret(-1);
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
-
-    // Get the endpoint device's friendly-name
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
-    }
-
-    if (ret == 0)
-    {
-        // Convert the endpoint device's friendly-name to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
-    }
-
-    if (guid != NULL && ret == 0)
-    {
-        // Convert the endpoint device's ID string to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    return ret;
+  return ret;
 }
 
 // ----------------------------------------------------------------------------
@@ -1801,1244 +1622,1081 @@
 int32_t AudioDeviceWindowsCore::RecordingDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  bool defaultCommunicationDevice(false);
+  const int16_t nDevices(
+      RecordingDevices());  // also updates the list of devices
 
-    bool defaultCommunicationDevice(false);
-    const int16_t nDevices(RecordingDevices());  // also updates the list of devices
+  // Special fix for the case when the user selects '-1' as index (<=> Default
+  // Communication Device)
+  if (index == (uint16_t)(-1)) {
+    defaultCommunicationDevice = true;
+    index = 0;
+    LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  }
 
-    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
-    if (index == (uint16_t)(-1))
-    {
-        defaultCommunicationDevice = true;
-        index = 0;
-        LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
+
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  int32_t ret(-1);
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+  // Get the endpoint device's friendly-name
+  if (defaultCommunicationDevice) {
+    ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName,
+                                bufferLen);
+  } else {
+    ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
+  }
+
+  if (ret == 0) {
+    // Convert the endpoint device's friendly-name to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+                            kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
+  // Get the endpoint ID string (uniquely identifies the device among all audio
+  // endpoint devices)
+  if (defaultCommunicationDevice) {
+    ret =
+        _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
+  } else {
+    ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
+  }
+
+  if (guid != NULL && ret == 0) {
+    // Convert the endpoint device's ID string to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+                            NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
-
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    rtc::CritScope lock(&_critSect);
-
-    int32_t ret(-1);
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
-
-    // Get the endpoint device's friendly-name
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
-    }
-
-    if (ret == 0)
-    {
-        // Convert the endpoint device's friendly-name to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
-    }
-
-    if (guid != NULL && ret == 0)
-    {
-        // Convert the endpoint device's ID string to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    return ret;
+  return ret;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingDevices
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::RecordingDevices()
-{
+int16_t AudioDeviceWindowsCore::RecordingDevices() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_RefreshDeviceList(eCapture) != -1) {
+    return (_DeviceListCount(eCapture));
+  }
 
-    if (_RefreshDeviceList(eCapture) != -1)
-    {
-        return (_DeviceListCount(eCapture));
-    }
-
-    return -1;
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetRecordingDevice I (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
-{
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+  // Get current number of available capture endpoint devices and refresh the
+  // capture collection.
+  UINT nDevices = RecordingDevices();
 
-    // Get current number of available capture endpoint devices and refresh the capture collection.
-    UINT nDevices = RecordingDevices();
+  if (index < 0 || index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index < 0 || index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  HRESULT hr(S_OK);
 
-    HRESULT hr(S_OK);
+  assert(_ptrCaptureCollection != NULL);
 
-    assert(_ptrCaptureCollection != NULL);
-
-    // Select an endpoint capture device given the specified index
+  // Select an endpoint capture device given the specified index
+  SAFE_RELEASE(_ptrDeviceIn);
+  hr = _ptrCaptureCollection->Item(index, &_ptrDeviceIn);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceIn);
-    hr = _ptrCaptureCollection->Item(
-                                 index,
-                                 &_ptrDeviceIn);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingInputDeviceIndex = true;
-    _inputDeviceIndex = index;
+  _usingInputDeviceIndex = true;
+  _inputDeviceIndex = index;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetRecordingDevice II (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
-{
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(
+    AudioDeviceModule::WindowsDeviceType device) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    ERole role(eCommunications);
+  ERole role(eCommunications);
 
-    if (device == AudioDeviceModule::kDefaultDevice)
-    {
-        role = eConsole;
-    }
-    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
-    {
-        role = eCommunications;
-    }
+  if (device == AudioDeviceModule::kDefaultDevice) {
+    role = eConsole;
+  } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+    role = eCommunications;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    // Refresh the list of capture endpoint devices
-    _RefreshDeviceList(eCapture);
+  // Refresh the list of capture endpoint devices
+  _RefreshDeviceList(eCapture);
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    //  Select an endpoint capture device given the specified role
+  //  Select an endpoint capture device given the specified role
+  SAFE_RELEASE(_ptrDeviceIn);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(eCapture, role, &_ptrDeviceIn);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceIn);
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           eCapture,
-                           role,
-                           &_ptrDeviceIn);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingInputDeviceIndex = false;
-    _inputDevice = device;
+  _usingInputDeviceIndex = false;
+  _inputDevice = device;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available) {
+  available = false;
 
-    available = false;
+  // Try to initialize the playout side
+  int32_t res = InitPlayout();
 
-    // Try to initialize the playout side
-    int32_t res = InitPlayout();
+  // Cancel effect of initialization
+  StopPlayout();
 
-    // Cancel effect of initialization
-    StopPlayout();
+  if (res != -1) {
+    available = true;
+  }
 
-    if (res != -1)
-    {
-        available = true;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available) {
+  available = false;
 
-    available = false;
+  // Try to initialize the recording side
+  int32_t res = InitRecording();
 
-    // Try to initialize the recording side
-    int32_t res = InitRecording();
+  // Cancel effect of initialization
+  StopRecording();
 
-    // Cancel effect of initialization
-    StopRecording();
+  if (res != -1) {
+    available = true;
+  }
 
-    if (res != -1)
-    {
-        available = true;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitPlayout()
-{
+int32_t AudioDeviceWindowsCore::InitPlayout() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_playing) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
-    }
-
-    if (_playIsInitialized)
-    {
-        return 0;
-    }
-
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
-
-    // Initialize the speaker (devices might have been added or removed)
-    if (InitSpeaker() == -1)
-    {
-        LOG(LS_WARNING) << "InitSpeaker() failed";
-    }
-
-    // Ensure that the updated rendering endpoint device is valid
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
-
-    if (_builtInAecEnabled && _recIsInitialized)
-    {
-        // Ensure the correct render device is configured in case
-        // InitRecording() was called before InitPlayout().
-        if (SetDMOProperties() == -1)
-        {
-            return -1;
-        }
-    }
-
-    HRESULT hr = S_OK;
-    WAVEFORMATEX* pWfxOut = NULL;
-    WAVEFORMATEX Wfx = WAVEFORMATEX();
-    WAVEFORMATEX* pWfxClosestMatch = NULL;
-
-    // Create COM object with IAudioClient interface.
-    SAFE_RELEASE(_ptrClientOut);
-    hr = _ptrDeviceOut->Activate(
-                          __uuidof(IAudioClient),
-                          CLSCTX_ALL,
-                          NULL,
-                          (void**)&_ptrClientOut);
-    EXIT_ON_ERROR(hr);
-
-    // Retrieve the stream format that the audio engine uses for its internal
-    // processing (mixing) of shared-mode streams.
-    hr = _ptrClientOut->GetMixFormat(&pWfxOut);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
-        // format type
-        LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex
-                        << pWfxOut->wFormatTag << std::dec << " ("
-                        << pWfxOut->wFormatTag << ")";
-        // number of channels (i.e. mono, stereo...)
-        LOG(LS_VERBOSE) << "nChannels      : " << pWfxOut->nChannels;
-        // sample rate
-        LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
-        // for buffer estimation
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
-        // block size of data
-        LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxOut->nBlockAlign;
-        // number of bits per sample of mono data
-        LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize         : " << pWfxOut->cbSize;
-    }
-
-    // Set wave format
-    Wfx.wFormatTag = WAVE_FORMAT_PCM;
-    Wfx.wBitsPerSample = 16;
-    Wfx.cbSize = 0;
-
-    const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
-    hr = S_FALSE;
-
-    // Iterate over frequencies and channels, in order of priority
-    for (unsigned int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
-    {
-        for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList)/sizeof(_playChannelsPrioList[0]); chan++)
-        {
-            Wfx.nChannels = _playChannelsPrioList[chan];
-            Wfx.nSamplesPerSec = freqs[freq];
-            Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
-            Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
-            // If the method succeeds and the audio endpoint device supports the specified stream format,
-            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
-            // it returns S_FALSE.
-            hr = _ptrClientOut->IsFormatSupported(
-                                  AUDCLNT_SHAREMODE_SHARED,
-                                  &Wfx,
-                                  &pWfxClosestMatch);
-            if (hr == S_OK)
-            {
-                break;
-            }
-            else
-            {
-                if (pWfxClosestMatch)
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.nSamplesPerSec <<
-                        " is not supported. Closest match: " <<
-                        "nChannels=" << pWfxClosestMatch->nChannels <<
-                        ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
-                    CoTaskMemFree(pWfxClosestMatch);
-                    pWfxClosestMatch = NULL;
-                }
-                else
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.nSamplesPerSec <<
-                        " is not supported. No closest match.";
-                }
-            }
-        }
-        if (hr == S_OK)
-            break;
-    }
-
-    // TODO(andrew): what happens in the event of failure in the above loop?
-    //   Is _ptrClientOut->Initialize expected to fail?
-    //   Same in InitRecording().
-    if (hr == S_OK)
-    {
-        _playAudioFrameSize = Wfx.nBlockAlign;
-        // Block size in frames is the number of samples each channel in 10ms.
-        _playBlockSizeInFrames = Wfx.nSamplesPerSec / 100;
-        // Block size in samples is block size in frames times number of
-        // channels.
-        _playBlockSizeInSamples = _playBlockSizeInFrames * Wfx.nChannels;
-        _playSampleRate = Wfx.nSamplesPerSec;
-        _devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
-        _devicePlayBlockSize = Wfx.nSamplesPerSec/100;
-        _playChannels = Wfx.nChannels;
-
-        LOG(LS_VERBOSE) << "VoE selected this rendering format:";
-        LOG(LS_VERBOSE) << "wFormatTag         : 0x" << std::hex
-                        << Wfx.wFormatTag << std::dec << " (" << Wfx.wFormatTag
-                        << ")";
-        LOG(LS_VERBOSE) << "nChannels          : " << Wfx.nChannels;
-        LOG(LS_VERBOSE) << "nSamplesPerSec     : " << Wfx.nSamplesPerSec;
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec    : " << Wfx.nAvgBytesPerSec;
-        LOG(LS_VERBOSE) << "nBlockAlign        : " << Wfx.nBlockAlign;
-        LOG(LS_VERBOSE) << "wBitsPerSample     : " << Wfx.wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize             : " << Wfx.cbSize;
-        LOG(LS_VERBOSE) << "Additional settings:";
-        LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
-        LOG(LS_VERBOSE) << "_playBlockSizeInFrames     : "
-                        << _playBlockSizeInFrames;
-        LOG(LS_VERBOSE) << "_playChannels      : " << _playChannels;
-    }
-
-    // Create a rendering stream.
-    //
-    // ****************************************************************************
-    // For a shared-mode stream that uses event-driven buffering, the caller must
-    // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
-    // determines how large a buffer to allocate based on the scheduling period
-    // of the audio engine. Although the client's buffer processing thread is
-    // event driven, the basic buffer management process, as described previously,
-    // is unaltered.
-    // Each time the thread awakens, it should call IAudioClient::GetCurrentPadding
-    // to determine how much data to write to a rendering buffer or read from a capture
-    // buffer. In contrast to the two buffers that the Initialize method allocates
-    // for an exclusive-mode stream that uses event-driven buffering, a shared-mode
-    // stream requires a single buffer.
-    // ****************************************************************************
-    //
-    REFERENCE_TIME hnsBufferDuration = 0;  // ask for minimum buffer size (default)
-    if (_devicePlaySampleRate == 44100)
-    {
-        // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
-        // There seems to be a larger risk of underruns for 44.1 compared
-        // with the default rate (48kHz). When using default, we set the requested
-        // buffer duration to 0, which sets the buffer to the minimum size
-        // required by the engine thread. The actual buffer size can then be
-        // read by GetBufferSize() and it is 20ms on most machines.
-        hnsBufferDuration = 30*10000;
-    }
-    hr = _ptrClientOut->Initialize(
-                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
-                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK,    // processing of the audio buffer by the client will be event driven
-                          hnsBufferDuration,                    // requested buffer capacity as a time value (in 100-nanosecond units)
-                          0,                                    // periodicity
-                          &Wfx,                                 // selected wave format
-                          NULL);                                // session GUID
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
-    }
-    EXIT_ON_ERROR(hr);
-
-    if (_ptrAudioBuffer)
-    {
-        // Update the audio buffer with the selected parameters
-        _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
-        _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
-    }
-    else
-    {
-        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
-        // has been created, hence the AudioDeviceBuffer does not exist.
-        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
-
-    // Get the actual size of the shared (endpoint buffer).
-    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
-    UINT bufferFrameCount(0);
-    hr = _ptrClientOut->GetBufferSize(
-                          &bufferFrameCount);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
-                        << bufferFrameCount << " (<=> "
-                        << bufferFrameCount*_playAudioFrameSize << " bytes)";
-    }
-
-    // Set the event handle that the system signals when an audio buffer is ready
-    // to be processed by the client.
-    hr = _ptrClientOut->SetEventHandle(
-                          _hRenderSamplesReadyEvent);
-    EXIT_ON_ERROR(hr);
-
-    // Get an IAudioRenderClient interface.
-    SAFE_RELEASE(_ptrRenderClient);
-    hr = _ptrClientOut->GetService(
-                          __uuidof(IAudioRenderClient),
-                          (void**)&_ptrRenderClient);
-    EXIT_ON_ERROR(hr);
-
-    // Mark playout side as initialized
-    _playIsInitialized = true;
-
-    CoTaskMemFree(pWfxOut);
-    CoTaskMemFree(pWfxClosestMatch);
-
-    LOG(LS_VERBOSE) << "render side is now initialized";
+  if (_playIsInitialized) {
     return 0;
+  }
+
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  // Initialize the speaker (devices might have been added or removed)
+  if (InitSpeaker() == -1) {
+    LOG(LS_WARNING) << "InitSpeaker() failed";
+  }
+
+  // Ensure that the updated rendering endpoint device is valid
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  if (_builtInAecEnabled && _recIsInitialized) {
+    // Ensure the correct render device is configured in case
+    // InitRecording() was called before InitPlayout().
+    if (SetDMOProperties() == -1) {
+      return -1;
+    }
+  }
+
+  HRESULT hr = S_OK;
+  WAVEFORMATEX* pWfxOut = NULL;
+  WAVEFORMATEX Wfx = WAVEFORMATEX();
+  WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+  // Create COM object with IAudioClient interface.
+  SAFE_RELEASE(_ptrClientOut);
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+                               (void**)&_ptrClientOut);
+  EXIT_ON_ERROR(hr);
+
+  // Retrieve the stream format that the audio engine uses for its internal
+  // processing (mixing) of shared-mode streams.
+  hr = _ptrClientOut->GetMixFormat(&pWfxOut);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
+    // format type
+    LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex << pWfxOut->wFormatTag
+                    << std::dec << " (" << pWfxOut->wFormatTag << ")";
+    // number of channels (i.e. mono, stereo...)
+    LOG(LS_VERBOSE) << "nChannels      : " << pWfxOut->nChannels;
+    // sample rate
+    LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
+    // for buffer estimation
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
+    // block size of data
+    LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxOut->nBlockAlign;
+    // number of bits per sample of mono data
+    LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize         : " << pWfxOut->cbSize;
+  }
+
+  // Set wave format
+  Wfx.wFormatTag = WAVE_FORMAT_PCM;
+  Wfx.wBitsPerSample = 16;
+  Wfx.cbSize = 0;
+
+  const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
+  hr = S_FALSE;
+
+  // Iterate over frequencies and channels, in order of priority
+  for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+    for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList) /
+                                           sizeof(_playChannelsPrioList[0]);
+         chan++) {
+      Wfx.nChannels = _playChannelsPrioList[chan];
+      Wfx.nSamplesPerSec = freqs[freq];
+      Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
+      Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
+      // If the method succeeds and the audio endpoint device supports the
+      // specified stream format, it returns S_OK. If the method succeeds and
+      // provides a closest match to the specified format, it returns S_FALSE.
+      hr = _ptrClientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx,
+                                            &pWfxClosestMatch);
+      if (hr == S_OK) {
+        break;
+      } else {
+        if (pWfxClosestMatch) {
+          LOG(INFO) << "nChannels=" << Wfx.nChannels
+                    << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+                    << " is not supported. Closest match: "
+                    << "nChannels=" << pWfxClosestMatch->nChannels
+                    << ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
+          CoTaskMemFree(pWfxClosestMatch);
+          pWfxClosestMatch = NULL;
+        } else {
+          LOG(INFO) << "nChannels=" << Wfx.nChannels
+                    << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+                    << " is not supported. No closest match.";
+        }
+      }
+    }
+    if (hr == S_OK)
+      break;
+  }
+
+  // TODO(andrew): what happens in the event of failure in the above loop?
+  //   Is _ptrClientOut->Initialize expected to fail?
+  //   Same in InitRecording().
+  if (hr == S_OK) {
+    _playAudioFrameSize = Wfx.nBlockAlign;
+    // Block size in frames is the number of samples each channel in 10ms.
+    _playBlockSizeInFrames = Wfx.nSamplesPerSec / 100;
+    // Block size in samples is block size in frames times number of
+    // channels.
+    _playBlockSizeInSamples = _playBlockSizeInFrames * Wfx.nChannels;
+    _playSampleRate = Wfx.nSamplesPerSec;
+    _devicePlaySampleRate =
+        Wfx.nSamplesPerSec;  // The device itself continues to run at 44.1 kHz.
+    _devicePlayBlockSize = Wfx.nSamplesPerSec / 100;
+    _playChannels = Wfx.nChannels;
+
+    LOG(LS_VERBOSE) << "VoE selected this rendering format:";
+    LOG(LS_VERBOSE) << "wFormatTag         : 0x" << std::hex << Wfx.wFormatTag
+                    << std::dec << " (" << Wfx.wFormatTag << ")";
+    LOG(LS_VERBOSE) << "nChannels          : " << Wfx.nChannels;
+    LOG(LS_VERBOSE) << "nSamplesPerSec     : " << Wfx.nSamplesPerSec;
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec    : " << Wfx.nAvgBytesPerSec;
+    LOG(LS_VERBOSE) << "nBlockAlign        : " << Wfx.nBlockAlign;
+    LOG(LS_VERBOSE) << "wBitsPerSample     : " << Wfx.wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize             : " << Wfx.cbSize;
+    LOG(LS_VERBOSE) << "Additional settings:";
+    LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
+    LOG(LS_VERBOSE) << "_playBlockSizeInFrames     : "
+                    << _playBlockSizeInFrames;
+    LOG(LS_VERBOSE) << "_playChannels      : " << _playChannels;
+  }
+
+  // Create a rendering stream.
+  //
+  // ****************************************************************************
+  // For a shared-mode stream that uses event-driven buffering, the caller must
+  // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
+  // determines how large a buffer to allocate based on the scheduling period
+  // of the audio engine. Although the client's buffer processing thread is
+  // event driven, the basic buffer management process, as described previously,
+  // is unaltered.
+  // Each time the thread awakens, it should call
+  // IAudioClient::GetCurrentPadding to determine how much data to write to a
+  // rendering buffer or read from a capture buffer. In contrast to the two
+  // buffers that the Initialize method allocates for an exclusive-mode stream
+  // that uses event-driven buffering, a shared-mode stream requires a single
+  // buffer.
+  // ****************************************************************************
+  //
+  REFERENCE_TIME hnsBufferDuration =
+      0;  // ask for minimum buffer size (default)
+  if (_devicePlaySampleRate == 44100) {
+    // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
+    // There seems to be a larger risk of underruns for 44.1 compared
+    // with the default rate (48kHz). When using default, we set the requested
+    // buffer duration to 0, which sets the buffer to the minimum size
+    // required by the engine thread. The actual buffer size can then be
+    // read by GetBufferSize() and it is 20ms on most machines.
+    hnsBufferDuration = 30 * 10000;
+  }
+  hr = _ptrClientOut->Initialize(
+      AUDCLNT_SHAREMODE_SHARED,  // share Audio Engine with other applications
+      AUDCLNT_STREAMFLAGS_EVENTCALLBACK,  // processing of the audio buffer by
+                                          // the client will be event driven
+      hnsBufferDuration,  // requested buffer capacity as a time value (in
+                          // 100-nanosecond units)
+      0,                  // periodicity
+      &Wfx,               // selected wave format
+      NULL);              // session GUID
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+  }
+  EXIT_ON_ERROR(hr);
+
+  if (_ptrAudioBuffer) {
+    // Update the audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
+    _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+  } else {
+    // We can enter this state during CoreAudioIsSupported() when no
+    // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+    // does not exist. It is OK to end up here since we don't initiate any media
+    // in CoreAudioIsSupported().
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
+
+  // Get the actual size of the shared (endpoint buffer).
+  // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+  UINT bufferFrameCount(0);
+  hr = _ptrClientOut->GetBufferSize(&bufferFrameCount);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " << bufferFrameCount
+                    << " (<=> " << bufferFrameCount * _playAudioFrameSize
+                    << " bytes)";
+  }
+
+  // Set the event handle that the system signals when an audio buffer is ready
+  // to be processed by the client.
+  hr = _ptrClientOut->SetEventHandle(_hRenderSamplesReadyEvent);
+  EXIT_ON_ERROR(hr);
+
+  // Get an IAudioRenderClient interface.
+  SAFE_RELEASE(_ptrRenderClient);
+  hr = _ptrClientOut->GetService(__uuidof(IAudioRenderClient),
+                                 (void**)&_ptrRenderClient);
+  EXIT_ON_ERROR(hr);
+
+  // Mark playout side as initialized
+  _playIsInitialized = true;
+
+  CoTaskMemFree(pWfxOut);
+  CoTaskMemFree(pWfxClosestMatch);
+
+  LOG(LS_VERBOSE) << "render side is now initialized";
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    CoTaskMemFree(pWfxOut);
-    CoTaskMemFree(pWfxClosestMatch);
-    SAFE_RELEASE(_ptrClientOut);
-    SAFE_RELEASE(_ptrRenderClient);
-    return -1;
+  _TraceCOMError(hr);
+  CoTaskMemFree(pWfxOut);
+  CoTaskMemFree(pWfxClosestMatch);
+  SAFE_RELEASE(_ptrClientOut);
+  SAFE_RELEASE(_ptrRenderClient);
+  return -1;
 }
 
 // Capture initialization when the built-in AEC DirectX Media Object (DMO) is
 // used. Called from InitRecording(), most of which is skipped over. The DMO
 // handles device initialization itself.
 // Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
-int32_t AudioDeviceWindowsCore::InitRecordingDMO()
-{
-    assert(_builtInAecEnabled);
-    assert(_dmo != NULL);
+int32_t AudioDeviceWindowsCore::InitRecordingDMO() {
+  assert(_builtInAecEnabled);
+  assert(_dmo != NULL);
 
-    if (SetDMOProperties() == -1)
-    {
-        return -1;
-    }
+  if (SetDMOProperties() == -1) {
+    return -1;
+  }
 
-    DMO_MEDIA_TYPE mt = {0};
-    HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
-    if (FAILED(hr))
-    {
-        MoFreeMediaType(&mt);
-        _TraceCOMError(hr);
-        return -1;
-    }
-    mt.majortype = MEDIATYPE_Audio;
-    mt.subtype = MEDIASUBTYPE_PCM;
-    mt.formattype = FORMAT_WaveFormatEx;
-
-    // Supported formats
-    // nChannels: 1 (in AEC-only mode)
-    // nSamplesPerSec: 8000, 11025, 16000, 22050
-    // wBitsPerSample: 16
-    WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
-    ptrWav->wFormatTag = WAVE_FORMAT_PCM;
-    ptrWav->nChannels = 1;
-    // 16000 is the highest we can support with our resampler.
-    ptrWav->nSamplesPerSec = 16000;
-    ptrWav->nAvgBytesPerSec = 32000;
-    ptrWav->nBlockAlign = 2;
-    ptrWav->wBitsPerSample = 16;
-    ptrWav->cbSize = 0;
-
-    // Set the VoE format equal to the AEC output format.
-    _recAudioFrameSize = ptrWav->nBlockAlign;
-    _recSampleRate = ptrWav->nSamplesPerSec;
-    _recBlockSize = ptrWav->nSamplesPerSec / 100;
-    _recChannels = ptrWav->nChannels;
-
-    // Set the DMO output format parameters.
-    hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+  DMO_MEDIA_TYPE mt = {0};
+  HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
+  if (FAILED(hr)) {
     MoFreeMediaType(&mt);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+    _TraceCOMError(hr);
+    return -1;
+  }
+  mt.majortype = MEDIATYPE_Audio;
+  mt.subtype = MEDIASUBTYPE_PCM;
+  mt.formattype = FORMAT_WaveFormatEx;
 
-    if (_ptrAudioBuffer)
-    {
-        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
-        _ptrAudioBuffer->SetRecordingChannels(_recChannels);
-    }
-    else
-    {
-        // Refer to InitRecording() for comments.
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
+  // Supported formats
+  // nChannels: 1 (in AEC-only mode)
+  // nSamplesPerSec: 8000, 11025, 16000, 22050
+  // wBitsPerSample: 16
+  WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
+  ptrWav->wFormatTag = WAVE_FORMAT_PCM;
+  ptrWav->nChannels = 1;
+  // 16000 is the highest we can support with our resampler.
+  ptrWav->nSamplesPerSec = 16000;
+  ptrWav->nAvgBytesPerSec = 32000;
+  ptrWav->nBlockAlign = 2;
+  ptrWav->wBitsPerSample = 16;
+  ptrWav->cbSize = 0;
 
-    _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
+  // Set the VoE format equal to the AEC output format.
+  _recAudioFrameSize = ptrWav->nBlockAlign;
+  _recSampleRate = ptrWav->nSamplesPerSec;
+  _recBlockSize = ptrWav->nSamplesPerSec / 100;
+  _recChannels = ptrWav->nChannels;
 
-    // Optional, but if called, must be after media types are set.
-    hr = _dmo->AllocateStreamingResources();
-    if (FAILED(hr))
-    {
-         _TraceCOMError(hr);
-        return -1;
-    }
+  // Set the DMO output format parameters.
+  hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+  MoFreeMediaType(&mt);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    _recIsInitialized = true;
-    LOG(LS_VERBOSE) << "Capture side is now initialized";
+  if (_ptrAudioBuffer) {
+    _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+    _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+  } else {
+    // Refer to InitRecording() for comments.
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
 
-    return 0;
+  _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
+
+  // Optional, but if called, must be after media types are set.
+  hr = _dmo->AllocateStreamingResources();
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+
+  _recIsInitialized = true;
+  LOG(LS_VERBOSE) << "Capture side is now initialized";
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitRecording()
-{
+int32_t AudioDeviceWindowsCore::InitRecording() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_recording) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
-    }
-
-    if (_recIsInitialized)
-    {
-        return 0;
-    }
-
-    if (QueryPerformanceFrequency(&_perfCounterFreq) == 0)
-    {
-        return -1;
-    }
-    _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
-
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
-
-    // Initialize the microphone (devices might have been added or removed)
-    if (InitMicrophone() == -1)
-    {
-        LOG(LS_WARNING) << "InitMicrophone() failed";
-    }
-
-    // Ensure that the updated capturing endpoint device is valid
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
-
-    if (_builtInAecEnabled)
-    {
-        // The DMO will configure the capture device.
-        return InitRecordingDMO();
-    }
-
-    HRESULT hr = S_OK;
-    WAVEFORMATEX* pWfxIn = NULL;
-    WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
-    WAVEFORMATEX* pWfxClosestMatch = NULL;
-
-    // Create COM object with IAudioClient interface.
-    SAFE_RELEASE(_ptrClientIn);
-    hr = _ptrDeviceIn->Activate(
-                          __uuidof(IAudioClient),
-                          CLSCTX_ALL,
-                          NULL,
-                          (void**)&_ptrClientIn);
-    EXIT_ON_ERROR(hr);
-
-    // Retrieve the stream format that the audio engine uses for its internal
-    // processing (mixing) of shared-mode streams.
-    hr = _ptrClientIn->GetMixFormat(&pWfxIn);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
-        // format type
-        LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex
-                        << pWfxIn->wFormatTag << std::dec << " ("
-                        << pWfxIn->wFormatTag << ")";
-        // number of channels (i.e. mono, stereo...)
-        LOG(LS_VERBOSE) << "nChannels      : " << pWfxIn->nChannels;
-        // sample rate
-        LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
-        // for buffer estimation
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
-        // block size of data
-        LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxIn->nBlockAlign;
-        // number of bits per sample of mono data
-        LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize         : " << pWfxIn->cbSize;
-    }
-
-    // Set wave format
-    Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
-    Wfx.Format.wBitsPerSample = 16;
-    Wfx.Format.cbSize = 22;
-    Wfx.dwChannelMask = 0;
-    Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
-    Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
-
-    const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
-    hr = S_FALSE;
-
-    // Iterate over frequencies and channels, in order of priority
-    for (unsigned int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
-    {
-        for (unsigned int chan = 0; chan < sizeof(_recChannelsPrioList)/sizeof(_recChannelsPrioList[0]); chan++)
-        {
-            Wfx.Format.nChannels = _recChannelsPrioList[chan];
-            Wfx.Format.nSamplesPerSec = freqs[freq];
-            Wfx.Format.nBlockAlign = Wfx.Format.nChannels *
-                                     Wfx.Format.wBitsPerSample / 8;
-            Wfx.Format.nAvgBytesPerSec = Wfx.Format.nSamplesPerSec *
-                                         Wfx.Format.nBlockAlign;
-            // If the method succeeds and the audio endpoint device supports the specified stream format,
-            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
-            // it returns S_FALSE.
-            hr = _ptrClientIn->IsFormatSupported(
-                                  AUDCLNT_SHAREMODE_SHARED,
-                                  (WAVEFORMATEX*)&Wfx,
-                                  &pWfxClosestMatch);
-            if (hr == S_OK)
-            {
-                break;
-            }
-            else
-            {
-                if (pWfxClosestMatch)
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.Format.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec <<
-                        " is not supported. Closest match: " <<
-                        "nChannels=" << pWfxClosestMatch->nChannels <<
-                        ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
-                    CoTaskMemFree(pWfxClosestMatch);
-                    pWfxClosestMatch = NULL;
-                }
-                else
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.Format.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec <<
-                        " is not supported. No closest match.";
-                }
-            }
-        }
-        if (hr == S_OK)
-            break;
-    }
-
-    if (hr == S_OK)
-    {
-        _recAudioFrameSize = Wfx.Format.nBlockAlign;
-        _recSampleRate = Wfx.Format.nSamplesPerSec;
-        _recBlockSize = Wfx.Format.nSamplesPerSec/100;
-        _recChannels = Wfx.Format.nChannels;
-
-        LOG(LS_VERBOSE) << "VoE selected this capturing format:";
-        LOG(LS_VERBOSE) << "wFormatTag        : 0x" << std::hex
-                        << Wfx.Format.wFormatTag << std::dec
-                        << " (" << Wfx.Format.wFormatTag << ")";
-        LOG(LS_VERBOSE) << "nChannels         : " << Wfx.Format.nChannels;
-        LOG(LS_VERBOSE) << "nSamplesPerSec    : " << Wfx.Format.nSamplesPerSec;
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec   : " << Wfx.Format.nAvgBytesPerSec;
-        LOG(LS_VERBOSE) << "nBlockAlign       : " << Wfx.Format.nBlockAlign;
-        LOG(LS_VERBOSE) << "wBitsPerSample    : " << Wfx.Format.wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize            : " << Wfx.Format.cbSize;
-        LOG(LS_VERBOSE) << "Additional settings:";
-        LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
-        LOG(LS_VERBOSE) << "_recBlockSize     : " << _recBlockSize;
-        LOG(LS_VERBOSE) << "_recChannels      : " << _recChannels;
-    }
-
-    // Create a capturing stream.
-    hr = _ptrClientIn->Initialize(
-                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
-                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK |   // processing of the audio buffer by the client will be event driven
-                          AUDCLNT_STREAMFLAGS_NOPERSIST,        // volume and mute settings for an audio session will not persist across system restarts
-                          0,                                    // required for event-driven shared mode
-                          0,                                    // periodicity
-                          (WAVEFORMATEX*)&Wfx,                  // selected wave format
-                          NULL);                                // session GUID
-
-
-    if (hr != S_OK)
-    {
-        LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
-    }
-    EXIT_ON_ERROR(hr);
-
-    if (_ptrAudioBuffer)
-    {
-        // Update the audio buffer with the selected parameters
-        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
-        _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
-    }
-    else
-    {
-        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
-        // has been created, hence the AudioDeviceBuffer does not exist.
-        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
-
-    // Get the actual size of the shared (endpoint buffer).
-    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
-    UINT bufferFrameCount(0);
-    hr = _ptrClientIn->GetBufferSize(
-                          &bufferFrameCount);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
-                        << bufferFrameCount << " (<=> "
-                        << bufferFrameCount*_recAudioFrameSize << " bytes)";
-    }
-
-    // Set the event handle that the system signals when an audio buffer is ready
-    // to be processed by the client.
-    hr = _ptrClientIn->SetEventHandle(
-                          _hCaptureSamplesReadyEvent);
-    EXIT_ON_ERROR(hr);
-
-    // Get an IAudioCaptureClient interface.
-    SAFE_RELEASE(_ptrCaptureClient);
-    hr = _ptrClientIn->GetService(
-                          __uuidof(IAudioCaptureClient),
-                          (void**)&_ptrCaptureClient);
-    EXIT_ON_ERROR(hr);
-
-    // Mark capture side as initialized
-    _recIsInitialized = true;
-
-    CoTaskMemFree(pWfxIn);
-    CoTaskMemFree(pWfxClosestMatch);
-
-    LOG(LS_VERBOSE) << "capture side is now initialized";
+  if (_recIsInitialized) {
     return 0;
+  }
+
+  if (QueryPerformanceFrequency(&_perfCounterFreq) == 0) {
+    return -1;
+  }
+  _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
+
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  // Initialize the microphone (devices might have been added or removed)
+  if (InitMicrophone() == -1) {
+    LOG(LS_WARNING) << "InitMicrophone() failed";
+  }
+
+  // Ensure that the updated capturing endpoint device is valid
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  if (_builtInAecEnabled) {
+    // The DMO will configure the capture device.
+    return InitRecordingDMO();
+  }
+
+  HRESULT hr = S_OK;
+  WAVEFORMATEX* pWfxIn = NULL;
+  WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
+  WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+  // Create COM object with IAudioClient interface.
+  SAFE_RELEASE(_ptrClientIn);
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+                              (void**)&_ptrClientIn);
+  EXIT_ON_ERROR(hr);
+
+  // Retrieve the stream format that the audio engine uses for its internal
+  // processing (mixing) of shared-mode streams.
+  hr = _ptrClientIn->GetMixFormat(&pWfxIn);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
+    // format type
+    LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex << pWfxIn->wFormatTag
+                    << std::dec << " (" << pWfxIn->wFormatTag << ")";
+    // number of channels (i.e. mono, stereo...)
+    LOG(LS_VERBOSE) << "nChannels      : " << pWfxIn->nChannels;
+    // sample rate
+    LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
+    // for buffer estimation
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
+    // block size of data
+    LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxIn->nBlockAlign;
+    // number of bits per sample of mono data
+    LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize         : " << pWfxIn->cbSize;
+  }
+
+  // Set wave format
+  Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+  Wfx.Format.wBitsPerSample = 16;
+  Wfx.Format.cbSize = 22;
+  Wfx.dwChannelMask = 0;
+  Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
+  Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+  const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
+  hr = S_FALSE;
+
+  // Iterate over frequencies and channels, in order of priority
+  for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+    for (unsigned int chan = 0;
+         chan < sizeof(_recChannelsPrioList) / sizeof(_recChannelsPrioList[0]);
+         chan++) {
+      Wfx.Format.nChannels = _recChannelsPrioList[chan];
+      Wfx.Format.nSamplesPerSec = freqs[freq];
+      Wfx.Format.nBlockAlign =
+          Wfx.Format.nChannels * Wfx.Format.wBitsPerSample / 8;
+      Wfx.Format.nAvgBytesPerSec =
+          Wfx.Format.nSamplesPerSec * Wfx.Format.nBlockAlign;
+      // If the method succeeds and the audio endpoint device supports the
+      // specified stream format, it returns S_OK. If the method succeeds and
+      // provides a closest match to the specified format, it returns S_FALSE.
+      hr = _ptrClientIn->IsFormatSupported(
+          AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&Wfx, &pWfxClosestMatch);
+      if (hr == S_OK) {
+        break;
+      } else {
+        if (pWfxClosestMatch) {
+          LOG(INFO) << "nChannels=" << Wfx.Format.nChannels
+                    << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+                    << " is not supported. Closest match: "
+                    << "nChannels=" << pWfxClosestMatch->nChannels
+                    << ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
+          CoTaskMemFree(pWfxClosestMatch);
+          pWfxClosestMatch = NULL;
+        } else {
+          LOG(INFO) << "nChannels=" << Wfx.Format.nChannels
+                    << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+                    << " is not supported. No closest match.";
+        }
+      }
+    }
+    if (hr == S_OK)
+      break;
+  }
+
+  if (hr == S_OK) {
+    _recAudioFrameSize = Wfx.Format.nBlockAlign;
+    _recSampleRate = Wfx.Format.nSamplesPerSec;
+    _recBlockSize = Wfx.Format.nSamplesPerSec / 100;
+    _recChannels = Wfx.Format.nChannels;
+
+    LOG(LS_VERBOSE) << "VoE selected this capturing format:";
+    LOG(LS_VERBOSE) << "wFormatTag        : 0x" << std::hex
+                    << Wfx.Format.wFormatTag << std::dec << " ("
+                    << Wfx.Format.wFormatTag << ")";
+    LOG(LS_VERBOSE) << "nChannels         : " << Wfx.Format.nChannels;
+    LOG(LS_VERBOSE) << "nSamplesPerSec    : " << Wfx.Format.nSamplesPerSec;
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec   : " << Wfx.Format.nAvgBytesPerSec;
+    LOG(LS_VERBOSE) << "nBlockAlign       : " << Wfx.Format.nBlockAlign;
+    LOG(LS_VERBOSE) << "wBitsPerSample    : " << Wfx.Format.wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize            : " << Wfx.Format.cbSize;
+    LOG(LS_VERBOSE) << "Additional settings:";
+    LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
+    LOG(LS_VERBOSE) << "_recBlockSize     : " << _recBlockSize;
+    LOG(LS_VERBOSE) << "_recChannels      : " << _recChannels;
+  }
+
+  // Create a capturing stream.
+  hr = _ptrClientIn->Initialize(
+      AUDCLNT_SHAREMODE_SHARED,  // share Audio Engine with other applications
+      AUDCLNT_STREAMFLAGS_EVENTCALLBACK |  // processing of the audio buffer by
+                                           // the client will be event driven
+          AUDCLNT_STREAMFLAGS_NOPERSIST,   // volume and mute settings for an
+                                           // audio session will not persist
+                                           // across system restarts
+      0,                    // required for event-driven shared mode
+      0,                    // periodicity
+      (WAVEFORMATEX*)&Wfx,  // selected wave format
+      NULL);                // session GUID
+
+  if (hr != S_OK) {
+    LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+  }
+  EXIT_ON_ERROR(hr);
+
+  if (_ptrAudioBuffer) {
+    // Update the audio buffer with the selected parameters
+    _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+    _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+  } else {
+    // We can enter this state during CoreAudioIsSupported() when no
+    // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+    // does not exist. It is OK to end up here since we don't initiate any media
+    // in CoreAudioIsSupported().
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
+
+  // Get the actual size of the shared (endpoint buffer).
+  // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+  UINT bufferFrameCount(0);
+  hr = _ptrClientIn->GetBufferSize(&bufferFrameCount);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " << bufferFrameCount
+                    << " (<=> " << bufferFrameCount * _recAudioFrameSize
+                    << " bytes)";
+  }
+
+  // Set the event handle that the system signals when an audio buffer is ready
+  // to be processed by the client.
+  hr = _ptrClientIn->SetEventHandle(_hCaptureSamplesReadyEvent);
+  EXIT_ON_ERROR(hr);
+
+  // Get an IAudioCaptureClient interface.
+  SAFE_RELEASE(_ptrCaptureClient);
+  hr = _ptrClientIn->GetService(__uuidof(IAudioCaptureClient),
+                                (void**)&_ptrCaptureClient);
+  EXIT_ON_ERROR(hr);
+
+  // Mark capture side as initialized
+  _recIsInitialized = true;
+
+  CoTaskMemFree(pWfxIn);
+  CoTaskMemFree(pWfxClosestMatch);
+
+  LOG(LS_VERBOSE) << "capture side is now initialized";
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    CoTaskMemFree(pWfxIn);
-    CoTaskMemFree(pWfxClosestMatch);
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrCaptureClient);
-    return -1;
+  _TraceCOMError(hr);
+  CoTaskMemFree(pWfxIn);
+  CoTaskMemFree(pWfxClosestMatch);
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrCaptureClient);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  StartRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StartRecording()
-{
+int32_t AudioDeviceWindowsCore::StartRecording() {
+  if (!_recIsInitialized) {
+    return -1;
+  }
 
-    if (!_recIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_hRecThread != NULL)
-    {
-        return 0;
-    }
-
-    if (_recording)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-
-        // Create thread which will drive the capturing
-        LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
-        if (_builtInAecEnabled)
-        {
-            // Redirect to the DMO polling method.
-            lpStartAddress = WSAPICaptureThreadPollDMO;
-
-            if (!_playing)
-            {
-                // The DMO won't provide us captured output data unless we
-                // give it render data to process.
-                LOG(LS_ERROR)
-                    << "Playout must be started before recording when using"
-                    << " the built-in AEC";
-                return -1;
-            }
-        }
-
-        assert(_hRecThread == NULL);
-        _hRecThread = CreateThread(NULL,
-                                   0,
-                                   lpStartAddress,
-                                   this,
-                                   0,
-                                   NULL);
-        if (_hRecThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the recording thread";
-            return -1;
-        }
-
-        // Set thread priority to highest possible
-        SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
-
-        assert(_hGetCaptureVolumeThread == NULL);
-        _hGetCaptureVolumeThread = CreateThread(NULL,
-                                                0,
-                                                GetCaptureVolumeThread,
-                                                this,
-                                                0,
-                                                NULL);
-        if (_hGetCaptureVolumeThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the volume getter thread";
-            return -1;
-        }
-
-        assert(_hSetCaptureVolumeThread == NULL);
-        _hSetCaptureVolumeThread = CreateThread(NULL,
-                                                0,
-                                                SetCaptureVolumeThread,
-                                                this,
-                                                0,
-                                                NULL);
-        if (_hSetCaptureVolumeThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the volume setter thread";
-            return -1;
-        }
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_VERBOSE) << "capturing did not start up properly";
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "capture audio stream has now started...";
-
-    _recording = true;
-
+  if (_hRecThread != NULL) {
     return 0;
+  }
+
+  if (_recording) {
+    return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    // Create thread which will drive the capturing
+    LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
+    if (_builtInAecEnabled) {
+      // Redirect to the DMO polling method.
+      lpStartAddress = WSAPICaptureThreadPollDMO;
+
+      if (!_playing) {
+        // The DMO won't provide us captured output data unless we
+        // give it render data to process.
+        LOG(LS_ERROR) << "Playout must be started before recording when using"
+                      << " the built-in AEC";
+        return -1;
+      }
+    }
+
+    assert(_hRecThread == NULL);
+    _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL);
+    if (_hRecThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the recording thread";
+      return -1;
+    }
+
+    // Set thread priority to highest possible
+    SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
+
+    assert(_hGetCaptureVolumeThread == NULL);
+    _hGetCaptureVolumeThread =
+        CreateThread(NULL, 0, GetCaptureVolumeThread, this, 0, NULL);
+    if (_hGetCaptureVolumeThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the volume getter thread";
+      return -1;
+    }
+
+    assert(_hSetCaptureVolumeThread == NULL);
+    _hSetCaptureVolumeThread =
+        CreateThread(NULL, 0, SetCaptureVolumeThread, this, 0, NULL);
+    if (_hSetCaptureVolumeThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the volume setter thread";
+      return -1;
+    }
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_VERBOSE) << "capturing did not start up properly";
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "capture audio stream has now started...";
+
+  _recording = true;
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StopRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StopRecording()
-{
-    int32_t err = 0;
+int32_t AudioDeviceWindowsCore::StopRecording() {
+  int32_t err = 0;
 
-    if (!_recIsInitialized)
-    {
-        return 0;
-    }
+  if (!_recIsInitialized) {
+    return 0;
+  }
 
-    _Lock();
+  _Lock();
 
-    if (_hRecThread == NULL)
-    {
-        LOG(LS_VERBOSE)
-            << "no capturing stream is active => close down WASAPI only";
-        SAFE_RELEASE(_ptrClientIn);
-        SAFE_RELEASE(_ptrCaptureClient);
-        _recIsInitialized = false;
-        _recording = false;
-        _UnLock();
-        return 0;
-    }
-
-    // Stop the driving thread...
-    LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
-    // Manual-reset event; it will remain signalled to stop all capture threads.
-    SetEvent(_hShutdownCaptureEvent);
-
-    _UnLock();
-    DWORD ret = WaitForSingleObject(_hRecThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_ERROR)
-            << "failed to close down webrtc_core_audio_capture_thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
-    }
-
-    ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down volume getter thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "volume getter thread is now closed";
-    }
-
-    ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down volume setter thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "volume setter thread is now closed";
-    }
-    _Lock();
-
-    ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
-    // Ensure that the thread has released these interfaces properly.
-    assert(err == -1 || _ptrClientIn == NULL);
-    assert(err == -1 || _ptrCaptureClient == NULL);
-
+  if (_hRecThread == NULL) {
+    LOG(LS_VERBOSE)
+        << "no capturing stream is active => close down WASAPI only";
+    SAFE_RELEASE(_ptrClientIn);
+    SAFE_RELEASE(_ptrCaptureClient);
     _recIsInitialized = false;
     _recording = false;
-
-    // These will create thread leaks in the result of an error,
-    // but we can at least resume the call.
-    CloseHandle(_hRecThread);
-    _hRecThread = NULL;
-
-    CloseHandle(_hGetCaptureVolumeThread);
-    _hGetCaptureVolumeThread = NULL;
-
-    CloseHandle(_hSetCaptureVolumeThread);
-    _hSetCaptureVolumeThread = NULL;
-
-    if (_builtInAecEnabled)
-    {
-        assert(_dmo != NULL);
-        // This is necessary. Otherwise the DMO can generate garbage render
-        // audio even after rendering has stopped.
-        HRESULT hr = _dmo->FreeStreamingResources();
-        if (FAILED(hr))
-        {
-            _TraceCOMError(hr);
-            err = -1;
-        }
-    }
-
-    // Reset the recording delay value.
-    _sndCardRecDelay = 0;
-
     _UnLock();
+    return 0;
+  }
 
-    return err;
+  // Stop the driving thread...
+  LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
+  // Manual-reset event; it will remain signalled to stop all capture threads.
+  SetEvent(_hShutdownCaptureEvent);
+
+  _UnLock();
+  DWORD ret = WaitForSingleObject(_hRecThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_ERROR) << "failed to close down webrtc_core_audio_capture_thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
+  }
+
+  ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down volume getter thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "volume getter thread is now closed";
+  }
+
+  ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down volume setter thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "volume setter thread is now closed";
+  }
+  _Lock();
+
+  ResetEvent(_hShutdownCaptureEvent);  // Must be manually reset.
+  // Ensure that the thread has released these interfaces properly.
+  assert(err == -1 || _ptrClientIn == NULL);
+  assert(err == -1 || _ptrCaptureClient == NULL);
+
+  _recIsInitialized = false;
+  _recording = false;
+
+  // These will create thread leaks in the result of an error,
+  // but we can at least resume the call.
+  CloseHandle(_hRecThread);
+  _hRecThread = NULL;
+
+  CloseHandle(_hGetCaptureVolumeThread);
+  _hGetCaptureVolumeThread = NULL;
+
+  CloseHandle(_hSetCaptureVolumeThread);
+  _hSetCaptureVolumeThread = NULL;
+
+  if (_builtInAecEnabled) {
+    assert(_dmo != NULL);
+    // This is necessary. Otherwise the DMO can generate garbage render
+    // audio even after rendering has stopped.
+    HRESULT hr = _dmo->FreeStreamingResources();
+    if (FAILED(hr)) {
+      _TraceCOMError(hr);
+      err = -1;
+    }
+  }
+
+  // Reset the recording delay value.
+  _sndCardRecDelay = 0;
+
+  _UnLock();
+
+  return err;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::RecordingIsInitialized() const
-{
-    return (_recIsInitialized);
+bool AudioDeviceWindowsCore::RecordingIsInitialized() const {
+  return (_recIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  Recording
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Recording() const
-{
-    return (_recording);
+bool AudioDeviceWindowsCore::Recording() const {
+  return (_recording);
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::PlayoutIsInitialized() const
-{
-
-    return (_playIsInitialized);
+bool AudioDeviceWindowsCore::PlayoutIsInitialized() const {
+  return (_playIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  StartPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StartPlayout()
-{
+int32_t AudioDeviceWindowsCore::StartPlayout() {
+  if (!_playIsInitialized) {
+    return -1;
+  }
 
-    if (!_playIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_hPlayThread != NULL)
-    {
-        return 0;
-    }
-
-    if (_playing)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-
-        // Create thread which will drive the rendering.
-        assert(_hPlayThread == NULL);
-        _hPlayThread = CreateThread(
-                         NULL,
-                         0,
-                         WSAPIRenderThread,
-                         this,
-                         0,
-                         NULL);
-        if (_hPlayThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the playout thread";
-            return -1;
-        }
-
-        // Set thread priority to highest possible.
-        SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_VERBOSE) << "rendering did not start up properly";
-        return -1;
-    }
-
-    _playing = true;
-    LOG(LS_VERBOSE) << "rendering audio stream has now started...";
-
+  if (_hPlayThread != NULL) {
     return 0;
+  }
+
+  if (_playing) {
+    return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    // Create thread which will drive the rendering.
+    assert(_hPlayThread == NULL);
+    _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL);
+    if (_hPlayThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the playout thread";
+      return -1;
+    }
+
+    // Set thread priority to highest possible.
+    SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_VERBOSE) << "rendering did not start up properly";
+    return -1;
+  }
+
+  _playing = true;
+  LOG(LS_VERBOSE) << "rendering audio stream has now started...";
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StopPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StopPlayout()
-{
-
-    if (!_playIsInitialized)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect) ;
-
-        if (_hPlayThread == NULL)
-        {
-            LOG(LS_VERBOSE)
-                << "no rendering stream is active => close down WASAPI only";
-            SAFE_RELEASE(_ptrClientOut);
-            SAFE_RELEASE(_ptrRenderClient);
-            _playIsInitialized = false;
-            _playing = false;
-            return 0;
-        }
-
-        // stop the driving thread...
-        LOG(LS_VERBOSE)
-            << "closing down the webrtc_core_audio_render_thread...";
-        SetEvent(_hShutdownRenderEvent);
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
-        CloseHandle(_hPlayThread);
-        _hPlayThread = NULL;
-        _playIsInitialized = false;
-        _playing = false;
-        return -1;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-        LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
-
-        // to reset this event manually at each time we finish with it,
-        // in case that the render thread has exited before StopPlayout(),
-        // this event might be caught by the new render thread within same VoE instance.
-        ResetEvent(_hShutdownRenderEvent);
-
-        SAFE_RELEASE(_ptrClientOut);
-        SAFE_RELEASE(_ptrRenderClient);
-
-        _playIsInitialized = false;
-        _playing = false;
-
-        CloseHandle(_hPlayThread);
-        _hPlayThread = NULL;
-
-        if (_builtInAecEnabled && _recording)
-        {
-            // The DMO won't provide us captured output data unless we
-            // give it render data to process.
-            //
-            // We still permit the playout to shutdown, and trace a warning.
-            // Otherwise, VoE can get into a state which will never permit
-            // playout to stop properly.
-            LOG(LS_WARNING)
-                << "Recording should be stopped before playout when using the"
-                << " built-in AEC";
-        }
-
-        // Reset the playout delay value.
-        _sndCardPlayDelay = 0;
-    }  // critScoped
-
+int32_t AudioDeviceWindowsCore::StopPlayout() {
+  if (!_playIsInitialized) {
     return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    if (_hPlayThread == NULL) {
+      LOG(LS_VERBOSE)
+          << "no rendering stream is active => close down WASAPI only";
+      SAFE_RELEASE(_ptrClientOut);
+      SAFE_RELEASE(_ptrRenderClient);
+      _playIsInitialized = false;
+      _playing = false;
+      return 0;
+    }
+
+    // stop the driving thread...
+    LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_render_thread...";
+    SetEvent(_hShutdownRenderEvent);
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
+    CloseHandle(_hPlayThread);
+    _hPlayThread = NULL;
+    _playIsInitialized = false;
+    _playing = false;
+    return -1;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+    LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
+
+    // to reset this event manually at each time we finish with it,
+    // in case that the render thread has exited before StopPlayout(),
+    // this event might be caught by the new render thread within same VoE
+    // instance.
+    ResetEvent(_hShutdownRenderEvent);
+
+    SAFE_RELEASE(_ptrClientOut);
+    SAFE_RELEASE(_ptrRenderClient);
+
+    _playIsInitialized = false;
+    _playing = false;
+
+    CloseHandle(_hPlayThread);
+    _hPlayThread = NULL;
+
+    if (_builtInAecEnabled && _recording) {
+      // The DMO won't provide us captured output data unless we
+      // give it render data to process.
+      //
+      // We still permit the playout to shutdown, and trace a warning.
+      // Otherwise, VoE can get into a state which will never permit
+      // playout to stop properly.
+      LOG(LS_WARNING)
+          << "Recording should be stopped before playout when using the"
+          << " built-in AEC";
+    }
+
+    // Reset the playout delay value.
+    _sndCardPlayDelay = 0;
+  }  // critScoped
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutDelay
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const
-{
-    rtc::CritScope critScoped(&_critSect);
-    delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
-    return 0;
+int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const {
+  rtc::CritScope critScoped(&_critSect);
+  delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  Playing
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Playing() const
-{
-    return (_playing);
+bool AudioDeviceWindowsCore::Playing() const {
+  return (_playing);
 }
 
 // ============================================================================
@@ -3049,1068 +2707,953 @@
 //  [static] WSAPIRenderThread
 // ----------------------------------------------------------------------------
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoRenderThread();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoRenderThread();
 }
 
 // ----------------------------------------------------------------------------
 //  [static] WSAPICaptureThread
 // ----------------------------------------------------------------------------
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoCaptureThread();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoCaptureThread();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoCaptureThreadPollDMO();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoCaptureThreadPollDMO();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoGetCaptureVolumeThread();
+DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoGetCaptureVolumeThread();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoSetCaptureVolumeThread();
+DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoSetCaptureVolumeThread();
 }
 
-DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread()
-{
-    HANDLE waitObject = _hShutdownCaptureEvent;
+DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread() {
+  HANDLE waitObject = _hShutdownCaptureEvent;
 
-    while (1)
-    {
-        if (AGC())
-        {
-            uint32_t currentMicLevel = 0;
-            if (MicrophoneVolume(currentMicLevel) == 0)
-            {
-                // This doesn't set the system volume, just stores it.
-                _Lock();
-                if (_ptrAudioBuffer)
-                {
-                    _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
-                }
-                _UnLock();
-            }
-        }
-
-        DWORD waitResult = WaitForSingleObject(waitObject,
-                                               GET_MIC_VOLUME_INTERVAL_MS);
-        switch (waitResult)
-        {
-            case WAIT_OBJECT_0: // _hShutdownCaptureEvent
-                return 0;
-            case WAIT_TIMEOUT:  // timeout notification
-                break;
-            default:            // unexpected error
-                LOG(LS_WARNING)
-                    << "unknown wait termination on get volume thread";
-                return 1;
-        }
-    }
-}
-
-DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread()
-{
-    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
-
-    while (1)
-    {
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
-        switch (waitResult)
-        {
-            case WAIT_OBJECT_0:      // _hShutdownCaptureEvent
-                return 0;
-            case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
-                break;
-            default:                 // unexpected error
-                LOG(LS_WARNING)
-                    << "unknown wait termination on set volume thread";
-                    return 1;
-        }
-
+  while (1) {
+    if (AGC()) {
+      uint32_t currentMicLevel = 0;
+      if (MicrophoneVolume(currentMicLevel) == 0) {
+        // This doesn't set the system volume, just stores it.
         _Lock();
-        uint32_t newMicLevel = _newMicLevel;
-        _UnLock();
-
-        if (SetMicrophoneVolume(newMicLevel) == -1)
-        {
-            LOG(LS_WARNING)
-                << "the required modification of the microphone volume failed";
+        if (_ptrAudioBuffer) {
+          _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
         }
+        _UnLock();
+      }
     }
+
+    DWORD waitResult =
+        WaitForSingleObject(waitObject, GET_MIC_VOLUME_INTERVAL_MS);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        return 0;
+      case WAIT_TIMEOUT:  // timeout notification
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on get volume thread";
+        return 1;
+    }
+  }
+}
+
+DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread() {
+  HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
+
+  while (1) {
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        return 0;
+      case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on set volume thread";
+        return 1;
+    }
+
+    _Lock();
+    uint32_t newMicLevel = _newMicLevel;
+    _UnLock();
+
+    if (SetMicrophoneVolume(newMicLevel) == -1) {
+      LOG(LS_WARNING)
+          << "the required modification of the microphone volume failed";
+    }
+  }
 }
 
 // ----------------------------------------------------------------------------
 //  DoRenderThread
 // ----------------------------------------------------------------------------
 
-DWORD AudioDeviceWindowsCore::DoRenderThread()
-{
+DWORD AudioDeviceWindowsCore::DoRenderThread() {
+  bool keepPlaying = true;
+  HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
+  HRESULT hr = S_OK;
+  HANDLE hMmTask = NULL;
 
-    bool keepPlaying = true;
-    HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
-    HRESULT hr = S_OK;
-    HANDLE hMmTask = NULL;
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in render thread";
+    return 1;
+  }
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in render thread";
-      return 1;
+  rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+
+  // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+  // priority.
+  //
+  if (_winSupportAvrt) {
+    DWORD taskIndex(0);
+    hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+    if (hMmTask) {
+      if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) {
+        LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
+      }
+      LOG(LS_VERBOSE)
+          << "render thread is now registered with MMCSS (taskIndex="
+          << taskIndex << ")";
+    } else {
+      LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
+                      << GetLastError() << ")";
+      _TraceCOMError(GetLastError());
+    }
+  }
+
+  _Lock();
+
+  IAudioClock* clock = NULL;
+
+  // Get size of rendering buffer (length is expressed as the number of audio
+  // frames the buffer can hold). This value is fixed during the rendering
+  // session.
+  //
+  UINT32 bufferLength = 0;
+  hr = _ptrClientOut->GetBufferSize(&bufferLength);
+  EXIT_ON_ERROR(hr);
+  LOG(LS_VERBOSE) << "[REND] size of buffer       : " << bufferLength;
+
+  // Get maximum latency for the current stream (will not change for the
+  // lifetime  of the IAudioClient object).
+  //
+  REFERENCE_TIME latency;
+  _ptrClientOut->GetStreamLatency(&latency);
+  LOG(LS_VERBOSE) << "[REND] max stream latency   : " << (DWORD)latency << " ("
+                  << (double)(latency / 10000.0) << " ms)";
+
+  // Get the length of the periodic interval separating successive processing
+  // passes by the audio engine on the data in the endpoint buffer.
+  //
+  // The period between processing passes by the audio engine is fixed for a
+  // particular audio endpoint device and represents the smallest processing
+  // quantum for the audio engine. This period plus the stream latency between
+  // the buffer and endpoint device represents the minimum possible latency that
+  // an audio application can achieve. Typical value: 100000 <=> 0.01 sec =
+  // 10ms.
+  //
+  REFERENCE_TIME devPeriod = 0;
+  REFERENCE_TIME devPeriodMin = 0;
+  _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
+  LOG(LS_VERBOSE) << "[REND] device period        : " << (DWORD)devPeriod
+                  << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+  // Derive initial rendering delay.
+  // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
+  //
+  int playout_delay = 10 * (bufferLength / _playBlockSizeInFrames) +
+                      (int)((latency + devPeriod) / 10000);
+  _sndCardPlayDelay = playout_delay;
+  _writtenSamples = 0;
+  LOG(LS_VERBOSE) << "[REND] initial delay        : " << playout_delay;
+
+  double endpointBufferSizeMS =
+      10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
+  LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : " << endpointBufferSizeMS;
+
+  // Before starting the stream, fill the rendering buffer with silence.
+  //
+  BYTE* pData = NULL;
+  hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
+  EXIT_ON_ERROR(hr);
+
+  hr =
+      _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
+  EXIT_ON_ERROR(hr);
+
+  _writtenSamples += bufferLength;
+
+  hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
+  if (FAILED(hr)) {
+    LOG(LS_WARNING)
+        << "failed to get IAudioClock interface from the IAudioClient";
+  }
+
+  // Start up the rendering audio stream.
+  hr = _ptrClientOut->Start();
+  EXIT_ON_ERROR(hr);
+
+  _UnLock();
+
+  // Set event which will ensure that the calling thread modifies the playing
+  // state to true.
+  //
+  SetEvent(_hRenderStartedEvent);
+
+  // >> ------------------ THREAD LOOP ------------------
+
+  while (keepPlaying) {
+    // Wait for a render notification event or a shutdown event
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+    switch (waitResult) {
+      case WAIT_OBJECT_0 + 0:  // _hShutdownRenderEvent
+        keepPlaying = false;
+        break;
+      case WAIT_OBJECT_0 + 1:  // _hRenderSamplesReadyEvent
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
+        goto Exit;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on render side";
+        goto Exit;
     }
 
-    rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+    while (keepPlaying) {
+      _Lock();
 
-    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority.
-    //
-    if (_winSupportAvrt)
-    {
-        DWORD taskIndex(0);
-        hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
-        if (hMmTask)
-        {
-            if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL))
-            {
-                LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
-            }
-            LOG(LS_VERBOSE)
-                << "render thread is now registered with MMCSS (taskIndex="
-                << taskIndex << ")";
-        }
-        else
-        {
-            LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
-                            << GetLastError() << ")";
-            _TraceCOMError(GetLastError());
-        }
-    }
+      // Sanity check to ensure that essential states are not modified
+      // during the unlocked period.
+      if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+        _UnLock();
+        LOG(LS_ERROR)
+            << "output state has been modified during unlocked period";
+        goto Exit;
+      }
 
-    _Lock();
+      // Get the number of frames of padding (queued up to play) in the endpoint
+      // buffer.
+      UINT32 padding = 0;
+      hr = _ptrClientOut->GetCurrentPadding(&padding);
+      EXIT_ON_ERROR(hr);
 
-    IAudioClock* clock = NULL;
+      // Derive the amount of available space in the output buffer
+      uint32_t framesAvailable = bufferLength - padding;
 
-    // Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
-    // This value is fixed during the rendering session.
-    //
-    UINT32 bufferLength = 0;
-    hr = _ptrClientOut->GetBufferSize(&bufferLength);
-    EXIT_ON_ERROR(hr);
-    LOG(LS_VERBOSE) << "[REND] size of buffer       : " << bufferLength;
+      // Do we have 10 ms available in the render buffer?
+      if (framesAvailable < _playBlockSizeInFrames) {
+        // Not enough space in render buffer to store next render packet.
+        _UnLock();
+        break;
+      }
 
-    // Get maximum latency for the current stream (will not change for the lifetime  of the IAudioClient object).
-    //
-    REFERENCE_TIME latency;
-    _ptrClientOut->GetStreamLatency(&latency);
-    LOG(LS_VERBOSE) << "[REND] max stream latency   : " << (DWORD)latency
-                    << " (" << (double)(latency/10000.0) << " ms)";
+      // Write n*10ms buffers to the render buffer
+      const uint32_t n10msBuffers = (framesAvailable / _playBlockSizeInFrames);
+      for (uint32_t n = 0; n < n10msBuffers; n++) {
+        // Get pointer (i.e., grab the buffer) to next space in the shared
+        // render buffer.
+        hr = _ptrRenderClient->GetBuffer(_playBlockSizeInFrames, &pData);
+        EXIT_ON_ERROR(hr);
 
-    // Get the length of the periodic interval separating successive processing passes by
-    // the audio engine on the data in the endpoint buffer.
-    //
-    // The period between processing passes by the audio engine is fixed for a particular
-    // audio endpoint device and represents the smallest processing quantum for the audio engine.
-    // This period plus the stream latency between the buffer and endpoint device represents
-    // the minimum possible latency that an audio application can achieve.
-    // Typical value: 100000 <=> 0.01 sec = 10ms.
-    //
-    REFERENCE_TIME devPeriod = 0;
-    REFERENCE_TIME devPeriodMin = 0;
-    _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
-    LOG(LS_VERBOSE) << "[REND] device period        : " << (DWORD)devPeriod
-                    << " (" << (double)(devPeriod/10000.0) << " ms)";
+        if (_ptrAudioBuffer) {
+          // Request data to be played out (#bytes =
+          // _playBlockSizeInFrames*_audioFrameSize)
+          _UnLock();
+          int32_t nSamples =
+              _ptrAudioBuffer->RequestPlayoutData(_playBlockSizeInFrames);
+          _Lock();
 
-    // Derive initial rendering delay.
-    // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
-    //
-    int playout_delay = 10 * (bufferLength / _playBlockSizeInFrames) +
-                        (int)((latency + devPeriod) / 10000);
-    _sndCardPlayDelay = playout_delay;
-    _writtenSamples = 0;
-    LOG(LS_VERBOSE) << "[REND] initial delay        : " << playout_delay;
-
-    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
-    LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : " << endpointBufferSizeMS;
-
-    // Before starting the stream, fill the rendering buffer with silence.
-    //
-    BYTE *pData = NULL;
-    hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
-    EXIT_ON_ERROR(hr);
-
-    hr = _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
-    EXIT_ON_ERROR(hr);
-
-    _writtenSamples += bufferLength;
-
-    hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
-    if (FAILED(hr)) {
-      LOG(LS_WARNING)
-          << "failed to get IAudioClock interface from the IAudioClient";
-    }
-
-    // Start up the rendering audio stream.
-    hr = _ptrClientOut->Start();
-    EXIT_ON_ERROR(hr);
-
-    _UnLock();
-
-    // Set event which will ensure that the calling thread modifies the playing state to true.
-    //
-    SetEvent(_hRenderStartedEvent);
-
-    // >> ------------------ THREAD LOOP ------------------
-
-    while (keepPlaying)
-    {
-        // Wait for a render notification event or a shutdown event
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0 + 0:     // _hShutdownRenderEvent
-            keepPlaying = false;
-            break;
-        case WAIT_OBJECT_0 + 1:     // _hRenderSamplesReadyEvent
-            break;
-        case WAIT_TIMEOUT:          // timeout notification
-            LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
-            goto Exit;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "unknown wait termination on render side";
-            goto Exit;
-        }
-
-        while (keepPlaying)
-        {
-            _Lock();
-
-            // Sanity check to ensure that essential states are not modified
-            // during the unlocked period.
-            if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
-            {
-                _UnLock();
-                LOG(LS_ERROR)
-                    << "output state has been modified during unlocked period";
-                goto Exit;
-            }
-
-            // Get the number of frames of padding (queued up to play) in the endpoint buffer.
-            UINT32 padding = 0;
-            hr = _ptrClientOut->GetCurrentPadding(&padding);
-            EXIT_ON_ERROR(hr);
-
-            // Derive the amount of available space in the output buffer
-            uint32_t framesAvailable = bufferLength - padding;
-
-            // Do we have 10 ms available in the render buffer?
-            if (framesAvailable < _playBlockSizeInFrames) {
-              // Not enough space in render buffer to store next render packet.
-              _UnLock();
-              break;
-            }
-
-            // Write n*10ms buffers to the render buffer
-            const uint32_t n10msBuffers =
-                (framesAvailable / _playBlockSizeInFrames);
-            for (uint32_t n = 0; n < n10msBuffers; n++)
-            {
-                // Get pointer (i.e., grab the buffer) to next space in the shared render buffer.
-                hr =
-                    _ptrRenderClient->GetBuffer(_playBlockSizeInFrames, &pData);
-                EXIT_ON_ERROR(hr);
-
-                if (_ptrAudioBuffer)
-                {
-                  // Request data to be played out (#bytes =
-                  // _playBlockSizeInFrames*_audioFrameSize)
-                  _UnLock();
-                  int32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(
-                      _playBlockSizeInFrames);
-                  _Lock();
-
-                  if (nSamples == -1) {
-                    _UnLock();
-                    LOG(LS_ERROR) << "failed to read data from render client";
-                    goto Exit;
-                    }
-
-                    // Sanity check to ensure that essential states are not modified during the unlocked period
-                    if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
-                    {
-                        _UnLock();
-                        LOG(LS_ERROR)
-                            << "output state has been modified during unlocked"
-                            << " period";
-                        goto Exit;
-                    }
-                    if (nSamples !=
-                        static_cast<int32_t>(_playBlockSizeInSamples)) {
-                      LOG(LS_WARNING)
-                          << "nSamples(" << nSamples
-                          << ") != _playBlockSizeInSamples("
-                          << _playBlockSizeInSamples << ")";
-                    }
-
-                    // Get the actual (stored) data
-                    nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
-                }
-
-                DWORD dwFlags(0);
-                hr = _ptrRenderClient->ReleaseBuffer(_playBlockSizeInFrames,
-                                                     dwFlags);
-                // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
-                // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
-                EXIT_ON_ERROR(hr);
-
-                _writtenSamples += _playBlockSizeInFrames;
-            }
-
-            // Check the current delay on the playout side.
-            if (clock) {
-              UINT64 pos = 0;
-              UINT64 freq = 1;
-              clock->GetPosition(&pos, NULL);
-              clock->GetFrequency(&freq);
-              playout_delay = ROUND((double(_writtenSamples) /
-                  _devicePlaySampleRate - double(pos) / freq) * 1000.0);
-              _sndCardPlayDelay = playout_delay;
-            }
-
+          if (nSamples == -1) {
             _UnLock();
+            LOG(LS_ERROR) << "failed to read data from render client";
+            goto Exit;
+          }
+
+          // Sanity check to ensure that essential states are not modified
+          // during the unlocked period
+          if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+            _UnLock();
+            LOG(LS_ERROR) << "output state has been modified during unlocked"
+                          << " period";
+            goto Exit;
+          }
+          if (nSamples != static_cast<int32_t>(_playBlockSizeInSamples)) {
+            LOG(LS_WARNING)
+                << "nSamples(" << nSamples << ") != _playBlockSizeInSamples("
+                << _playBlockSizeInSamples << ")";
+          }
+
+          // Get the actual (stored) data
+          nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
         }
+
+        DWORD dwFlags(0);
+        hr = _ptrRenderClient->ReleaseBuffer(_playBlockSizeInFrames, dwFlags);
+        // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
+        // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
+        EXIT_ON_ERROR(hr);
+
+        _writtenSamples += _playBlockSizeInFrames;
+      }
+
+      // Check the current delay on the playout side.
+      if (clock) {
+        UINT64 pos = 0;
+        UINT64 freq = 1;
+        clock->GetPosition(&pos, NULL);
+        clock->GetFrequency(&freq);
+        playout_delay = ROUND((double(_writtenSamples) / _devicePlaySampleRate -
+                               double(pos) / freq) *
+                              1000.0);
+        _sndCardPlayDelay = playout_delay;
+      }
+
+      _UnLock();
     }
+  }
 
-    // ------------------ THREAD LOOP ------------------ <<
+  // ------------------ THREAD LOOP ------------------ <<
 
-    SleepMs(static_cast<DWORD>(endpointBufferSizeMS+0.5));
-    hr = _ptrClientOut->Stop();
+  SleepMs(static_cast<DWORD>(endpointBufferSizeMS + 0.5));
+  hr = _ptrClientOut->Stop();
 
 Exit:
-    SAFE_RELEASE(clock);
+  SAFE_RELEASE(clock);
 
-    if (FAILED(hr))
-    {
-        _ptrClientOut->Stop();
-        _UnLock();
-        _TraceCOMError(hr);
-    }
-
-    if (_winSupportAvrt)
-    {
-        if (NULL != hMmTask)
-        {
-            _PAvRevertMmThreadCharacteristics(hMmTask);
-        }
-    }
-
-    _Lock();
-
-    if (keepPlaying)
-    {
-        if (_ptrClientOut != NULL)
-        {
-            hr = _ptrClientOut->Stop();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-            hr = _ptrClientOut->Reset();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-        }
-        LOG(LS_ERROR)
-            << "Playout error: rendering thread has ended pre-maturely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
-    }
-
+  if (FAILED(hr)) {
+    _ptrClientOut->Stop();
     _UnLock();
+    _TraceCOMError(hr);
+  }
 
-    return (DWORD)hr;
+  if (_winSupportAvrt) {
+    if (NULL != hMmTask) {
+      _PAvRevertMmThreadCharacteristics(hMmTask);
+    }
+  }
+
+  _Lock();
+
+  if (keepPlaying) {
+    if (_ptrClientOut != NULL) {
+      hr = _ptrClientOut->Stop();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+      hr = _ptrClientOut->Reset();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+    }
+    LOG(LS_ERROR) << "Playout error: rendering thread has ended pre-maturely";
+  } else {
+    LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
+  }
+
+  _UnLock();
+
+  return (DWORD)hr;
 }
 
-DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
-{
-    _hMmTask = NULL;
+DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority() {
+  _hMmTask = NULL;
 
-    rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
+  rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
 
-    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
-    // priority.
-    if (_winSupportAvrt)
-    {
-        DWORD taskIndex(0);
-        _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
-        if (_hMmTask)
-        {
-            if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
-            {
-                LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
-            }
-            LOG(LS_VERBOSE)
-                << "capture thread is now registered with MMCSS (taskIndex="
-                << taskIndex << ")";
-        }
-        else
-        {
-            LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
-                            << GetLastError() << ")";
-            _TraceCOMError(GetLastError());
-        }
+  // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+  // priority.
+  if (_winSupportAvrt) {
+    DWORD taskIndex(0);
+    _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+    if (_hMmTask) {
+      if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) {
+        LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
+      }
+      LOG(LS_VERBOSE)
+          << "capture thread is now registered with MMCSS (taskIndex="
+          << taskIndex << ")";
+    } else {
+      LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
+                      << GetLastError() << ")";
+      _TraceCOMError(GetLastError());
     }
+  }
 
-    return S_OK;
+  return S_OK;
 }
 
-void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
-{
-    if (_winSupportAvrt)
-    {
-        if (NULL != _hMmTask)
-        {
-            _PAvRevertMmThreadCharacteristics(_hMmTask);
-        }
+void AudioDeviceWindowsCore::RevertCaptureThreadPriority() {
+  if (_winSupportAvrt) {
+    if (NULL != _hMmTask) {
+      _PAvRevertMmThreadCharacteristics(_hMmTask);
     }
+  }
 
-    _hMmTask = NULL;
+  _hMmTask = NULL;
 }
 
-DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
-{
-    assert(_mediaBuffer != NULL);
-    bool keepRecording = true;
+DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
+  assert(_mediaBuffer != NULL);
+  bool keepRecording = true;
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
-      return 1;
-    }
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
+    return 1;
+  }
 
-    HRESULT hr = InitCaptureThreadPriority();
-    if (FAILED(hr))
-    {
-        return hr;
-    }
-
-    // Set event which will ensure that the calling thread modifies the
-    // recording state to true.
-    SetEvent(_hCaptureStartedEvent);
-
-    // >> ---------------------------- THREAD LOOP ----------------------------
-    while (keepRecording)
-    {
-        // Poll the DMO every 5 ms.
-        // (The same interval used in the Wave implementation.)
-        DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0:         // _hShutdownCaptureEvent
-            keepRecording = false;
-            break;
-        case WAIT_TIMEOUT:          // timeout notification
-            break;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "Unknown wait termination on capture side";
-            hr = -1; // To signal an error callback.
-            keepRecording = false;
-            break;
-        }
-
-        while (keepRecording)
-        {
-            rtc::CritScope critScoped(&_critSect);
-
-            DWORD dwStatus = 0;
-            {
-                DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
-                dmoBuffer.pBuffer = _mediaBuffer;
-                dmoBuffer.pBuffer->AddRef();
-
-                // Poll the DMO for AEC processed capture data. The DMO will
-                // copy available data to |dmoBuffer|, and should only return
-                // 10 ms frames. The value of |dwStatus| should be ignored.
-                hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
-                SAFE_RELEASE(dmoBuffer.pBuffer);
-                dwStatus = dmoBuffer.dwStatus;
-            }
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            ULONG bytesProduced = 0;
-            BYTE* data;
-            // Get a pointer to the data buffer. This should be valid until
-            // the next call to ProcessOutput.
-            hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            // TODO(andrew): handle AGC.
-
-            if (bytesProduced > 0)
-            {
-                const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
-                // TODO(andrew): verify that this is always satisfied. It might
-                // be that ProcessOutput will try to return more than 10 ms if
-                // we fail to call it frequently enough.
-                assert(kSamplesProduced == static_cast<int>(_recBlockSize));
-                assert(sizeof(BYTE) == sizeof(int8_t));
-                _ptrAudioBuffer->SetRecordedBuffer(
-                    reinterpret_cast<int8_t*>(data),
-                    kSamplesProduced);
-                _ptrAudioBuffer->SetVQEData(0, 0, 0);
-
-                _UnLock();  // Release lock while making the callback.
-                _ptrAudioBuffer->DeliverRecordedData();
-                _Lock();
-            }
-
-            // Reset length to indicate buffer availability.
-            hr = _mediaBuffer->SetLength(0);
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE))
-            {
-                // The DMO cannot currently produce more data. This is the
-                // normal case; otherwise it means the DMO had more than 10 ms
-                // of data available and ProcessOutput should be called again.
-                break;
-            }
-        }
-    }
-    // ---------------------------- THREAD LOOP ---------------------------- <<
-
-    RevertCaptureThreadPriority();
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR)
-            << "Recording error: capturing thread has ended prematurely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
-    }
-
+  HRESULT hr = InitCaptureThreadPriority();
+  if (FAILED(hr)) {
     return hr;
-}
+  }
 
+  // Set event which will ensure that the calling thread modifies the
+  // recording state to true.
+  SetEvent(_hCaptureStartedEvent);
+
+  // >> ---------------------------- THREAD LOOP ----------------------------
+  while (keepRecording) {
+    // Poll the DMO every 5 ms.
+    // (The same interval used in the Wave implementation.)
+    DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        keepRecording = false;
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "Unknown wait termination on capture side";
+        hr = -1;  // To signal an error callback.
+        keepRecording = false;
+        break;
+    }
+
+    while (keepRecording) {
+      rtc::CritScope critScoped(&_critSect);
+
+      DWORD dwStatus = 0;
+      {
+        DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
+        dmoBuffer.pBuffer = _mediaBuffer;
+        dmoBuffer.pBuffer->AddRef();
+
+        // Poll the DMO for AEC processed capture data. The DMO will
+        // copy available data to |dmoBuffer|, and should only return
+        // 10 ms frames. The value of |dwStatus| should be ignored.
+        hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
+        SAFE_RELEASE(dmoBuffer.pBuffer);
+        dwStatus = dmoBuffer.dwStatus;
+      }
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      ULONG bytesProduced = 0;
+      BYTE* data;
+      // Get a pointer to the data buffer. This should be valid until
+      // the next call to ProcessOutput.
+      hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      // TODO(andrew): handle AGC.
+
+      if (bytesProduced > 0) {
+        const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
+        // TODO(andrew): verify that this is always satisfied. It might
+        // be that ProcessOutput will try to return more than 10 ms if
+        // we fail to call it frequently enough.
+        assert(kSamplesProduced == static_cast<int>(_recBlockSize));
+        assert(sizeof(BYTE) == sizeof(int8_t));
+        _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast<int8_t*>(data),
+                                           kSamplesProduced);
+        _ptrAudioBuffer->SetVQEData(0, 0, 0);
+
+        _UnLock();  // Release lock while making the callback.
+        _ptrAudioBuffer->DeliverRecordedData();
+        _Lock();
+      }
+
+      // Reset length to indicate buffer availability.
+      hr = _mediaBuffer->SetLength(0);
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) {
+        // The DMO cannot currently produce more data. This is the
+        // normal case; otherwise it means the DMO had more than 10 ms
+        // of data available and ProcessOutput should be called again.
+        break;
+      }
+    }
+  }
+  // ---------------------------- THREAD LOOP ---------------------------- <<
+
+  RevertCaptureThreadPriority();
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "Recording error: capturing thread has ended prematurely";
+  } else {
+    LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
+  }
+
+  return hr;
+}
 
 // ----------------------------------------------------------------------------
 //  DoCaptureThread
 // ----------------------------------------------------------------------------
 
-DWORD AudioDeviceWindowsCore::DoCaptureThread()
-{
+DWORD AudioDeviceWindowsCore::DoCaptureThread() {
+  bool keepRecording = true;
+  HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
+  HRESULT hr = S_OK;
 
-    bool keepRecording = true;
-    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
-    HRESULT hr = S_OK;
+  LARGE_INTEGER t1;
 
-    LARGE_INTEGER t1;
+  BYTE* syncBuffer = NULL;
+  UINT32 syncBufIndex = 0;
 
-    BYTE* syncBuffer = NULL;
-    UINT32 syncBufIndex = 0;
+  _readSamples = 0;
 
-    _readSamples = 0;
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in capture thread";
+    return 1;
+  }
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in capture thread";
-      return 1;
+  hr = InitCaptureThreadPriority();
+  if (FAILED(hr)) {
+    return hr;
+  }
+
+  _Lock();
+
+  // Get size of capturing buffer (length is expressed as the number of audio
+  // frames the buffer can hold). This value is fixed during the capturing
+  // session.
+  //
+  UINT32 bufferLength = 0;
+  if (_ptrClientIn == NULL) {
+    LOG(LS_ERROR)
+        << "input state has been modified before capture loop starts.";
+    return 1;
+  }
+  hr = _ptrClientIn->GetBufferSize(&bufferLength);
+  EXIT_ON_ERROR(hr);
+  LOG(LS_VERBOSE) << "[CAPT] size of buffer       : " << bufferLength;
+
+  // Allocate memory for sync buffer.
+  // It is used for compensation between native 44.1 and internal 44.0 and
+  // for cases when the capture buffer is larger than 10ms.
+  //
+  const UINT32 syncBufferSize = 2 * (bufferLength * _recAudioFrameSize);
+  syncBuffer = new BYTE[syncBufferSize];
+  if (syncBuffer == NULL) {
+    return (DWORD)E_POINTER;
+  }
+  LOG(LS_VERBOSE) << "[CAPT] size of sync buffer  : " << syncBufferSize
+                  << " [bytes]";
+
+  // Get maximum latency for the current stream (will not change for the
+  // lifetime of the IAudioClient object).
+  //
+  REFERENCE_TIME latency;
+  _ptrClientIn->GetStreamLatency(&latency);
+  LOG(LS_VERBOSE) << "[CAPT] max stream latency   : " << (DWORD)latency << " ("
+                  << (double)(latency / 10000.0) << " ms)";
+
+  // Get the length of the periodic interval separating successive processing
+  // passes by the audio engine on the data in the endpoint buffer.
+  //
+  REFERENCE_TIME devPeriod = 0;
+  REFERENCE_TIME devPeriodMin = 0;
+  _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
+  LOG(LS_VERBOSE) << "[CAPT] device period        : " << (DWORD)devPeriod
+                  << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+  double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
+  LOG(LS_VERBOSE) << "[CAPT] extraDelayMS         : " << extraDelayMS;
+
+  double endpointBufferSizeMS =
+      10.0 * ((double)bufferLength / (double)_recBlockSize);
+  LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : " << endpointBufferSizeMS;
+
+  // Start up the capturing stream.
+  //
+  hr = _ptrClientIn->Start();
+  EXIT_ON_ERROR(hr);
+
+  _UnLock();
+
+  // Set event which will ensure that the calling thread modifies the recording
+  // state to true.
+  //
+  SetEvent(_hCaptureStartedEvent);
+
+  // >> ---------------------------- THREAD LOOP ----------------------------
+
+  while (keepRecording) {
+    // Wait for a capture notification event or a shutdown event
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+    switch (waitResult) {
+      case WAIT_OBJECT_0 + 0:  // _hShutdownCaptureEvent
+        keepRecording = false;
+        break;
+      case WAIT_OBJECT_0 + 1:  // _hCaptureSamplesReadyEvent
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
+        goto Exit;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on capture side";
+        goto Exit;
     }
 
-    hr = InitCaptureThreadPriority();
-    if (FAILED(hr))
-    {
-        return hr;
-    }
+    while (keepRecording) {
+      BYTE* pData = 0;
+      UINT32 framesAvailable = 0;
+      DWORD flags = 0;
+      UINT64 recTime = 0;
+      UINT64 recPos = 0;
 
-    _Lock();
+      _Lock();
 
-    // Get size of capturing buffer (length is expressed as the number of audio frames the buffer can hold).
-    // This value is fixed during the capturing session.
-    //
-    UINT32 bufferLength = 0;
-    if (_ptrClientIn == NULL)
-    {
-      LOG(LS_ERROR)
-          << "input state has been modified before capture loop starts.";
-      return 1;
-    }
-    hr = _ptrClientIn->GetBufferSize(&bufferLength);
-    EXIT_ON_ERROR(hr);
-    LOG(LS_VERBOSE) << "[CAPT] size of buffer       : " << bufferLength;
+      // Sanity check to ensure that essential states are not modified
+      // during the unlocked period.
+      if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+        _UnLock();
+        LOG(LS_ERROR) << "input state has been modified during unlocked period";
+        goto Exit;
+      }
 
-    // Allocate memory for sync buffer.
-    // It is used for compensation between native 44.1 and internal 44.0 and
-    // for cases when the capture buffer is larger than 10ms.
-    //
-    const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
-    syncBuffer = new BYTE[syncBufferSize];
-    if (syncBuffer == NULL)
-    {
-        return (DWORD)E_POINTER;
-    }
-    LOG(LS_VERBOSE) << "[CAPT] size of sync buffer  : " << syncBufferSize
-                    << " [bytes]";
+      //  Find out how much capture data is available
+      //
+      hr = _ptrCaptureClient->GetBuffer(
+          &pData,            // packet which is ready to be read by used
+          &framesAvailable,  // #frames in the captured packet (can be zero)
+          &flags,            // support flags (check)
+          &recPos,    // device position of first audio frame in data packet
+          &recTime);  // value of performance counter at the time of recording
+                      // the first audio frame
 
-    // Get maximum latency for the current stream (will not change for the lifetime of the IAudioClient object).
-    //
-    REFERENCE_TIME latency;
-    _ptrClientIn->GetStreamLatency(&latency);
-    LOG(LS_VERBOSE) << "[CAPT] max stream latency   : " << (DWORD)latency
-                    << " (" << (double)(latency / 10000.0) << " ms)";
-
-    // Get the length of the periodic interval separating successive processing passes by
-    // the audio engine on the data in the endpoint buffer.
-    //
-    REFERENCE_TIME devPeriod = 0;
-    REFERENCE_TIME devPeriodMin = 0;
-    _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
-    LOG(LS_VERBOSE) << "[CAPT] device period        : " << (DWORD)devPeriod
-                    << " (" << (double)(devPeriod / 10000.0) << " ms)";
-
-    double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
-    LOG(LS_VERBOSE) << "[CAPT] extraDelayMS         : " << extraDelayMS;
-
-    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_recBlockSize);
-    LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : " << endpointBufferSizeMS;
-
-    // Start up the capturing stream.
-    //
-    hr = _ptrClientIn->Start();
-    EXIT_ON_ERROR(hr);
-
-    _UnLock();
-
-    // Set event which will ensure that the calling thread modifies the recording state to true.
-    //
-    SetEvent(_hCaptureStartedEvent);
-
-    // >> ---------------------------- THREAD LOOP ----------------------------
-
-    while (keepRecording)
-    {
-        // Wait for a capture notification event or a shutdown event
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0 + 0:        // _hShutdownCaptureEvent
-            keepRecording = false;
-            break;
-        case WAIT_OBJECT_0 + 1:        // _hCaptureSamplesReadyEvent
-            break;
-        case WAIT_TIMEOUT:            // timeout notification
-            LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
-            goto Exit;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "unknown wait termination on capture side";
-            goto Exit;
+      if (SUCCEEDED(hr)) {
+        if (AUDCLNT_S_BUFFER_EMPTY == hr) {
+          // Buffer was empty => start waiting for a new capture notification
+          // event
+          _UnLock();
+          break;
         }
 
-        while (keepRecording)
-        {
-            BYTE *pData = 0;
-            UINT32 framesAvailable = 0;
-            DWORD flags = 0;
-            UINT64 recTime = 0;
-            UINT64 recPos = 0;
+        if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+          // Treat all of the data in the packet as silence and ignore the
+          // actual data values.
+          LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
+          pData = NULL;
+        }
 
-            _Lock();
+        assert(framesAvailable != 0);
+
+        if (pData) {
+          CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData,
+                     framesAvailable * _recAudioFrameSize);
+        } else {
+          ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize],
+                     framesAvailable * _recAudioFrameSize);
+        }
+        assert(syncBufferSize >= (syncBufIndex * _recAudioFrameSize) +
+                                     framesAvailable * _recAudioFrameSize);
+
+        // Release the capture buffer
+        //
+        hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
+        EXIT_ON_ERROR(hr);
+
+        _readSamples += framesAvailable;
+        syncBufIndex += framesAvailable;
+
+        QueryPerformanceCounter(&t1);
+
+        // Get the current recording and playout delay.
+        uint32_t sndCardRecDelay = (uint32_t)(
+            ((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime) / 10000) +
+            (10 * syncBufIndex) / _recBlockSize - 10);
+        uint32_t sndCardPlayDelay = static_cast<uint32_t>(_sndCardPlayDelay);
+
+        _sndCardRecDelay = sndCardRecDelay;
+
+        while (syncBufIndex >= _recBlockSize) {
+          if (_ptrAudioBuffer) {
+            _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer,
+                                               _recBlockSize);
+            _ptrAudioBuffer->SetVQEData(sndCardPlayDelay, sndCardRecDelay, 0);
+
+            _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+            _UnLock();  // release lock while making the callback
+            _ptrAudioBuffer->DeliverRecordedData();
+            _Lock();  // restore the lock
 
             // Sanity check to ensure that essential states are not modified
-            // during the unlocked period.
-            if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
-            {
-                _UnLock();
-                LOG(LS_ERROR)
-                    << "input state has been modified during unlocked period";
-                goto Exit;
+            // during the unlocked period
+            if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+              _UnLock();
+              LOG(LS_ERROR) << "input state has been modified during"
+                            << " unlocked period";
+              goto Exit;
             }
+          }
 
-            //  Find out how much capture data is available
-            //
-            hr = _ptrCaptureClient->GetBuffer(&pData,           // packet which is ready to be read by used
-                                              &framesAvailable, // #frames in the captured packet (can be zero)
-                                              &flags,           // support flags (check)
-                                              &recPos,          // device position of first audio frame in data packet
-                                              &recTime);        // value of performance counter at the time of recording the first audio frame
-
-            if (SUCCEEDED(hr))
-            {
-                if (AUDCLNT_S_BUFFER_EMPTY == hr)
-                {
-                    // Buffer was empty => start waiting for a new capture notification event
-                    _UnLock();
-                    break;
-                }
-
-                if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
-                {
-                    // Treat all of the data in the packet as silence and ignore the actual data values.
-                    LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
-                    pData = NULL;
-                }
-
-                assert(framesAvailable != 0);
-
-                if (pData)
-                {
-                    CopyMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], pData, framesAvailable*_recAudioFrameSize);
-                }
-                else
-                {
-                    ZeroMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], framesAvailable*_recAudioFrameSize);
-                }
-                assert(syncBufferSize >= (syncBufIndex*_recAudioFrameSize)+framesAvailable*_recAudioFrameSize);
-
-                // Release the capture buffer
-                //
-                hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
-                EXIT_ON_ERROR(hr);
-
-                _readSamples += framesAvailable;
-                syncBufIndex += framesAvailable;
-
-                QueryPerformanceCounter(&t1);
-
-                // Get the current recording and playout delay.
-                uint32_t sndCardRecDelay = (uint32_t)
-                    (((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime)
-                        / 10000) + (10*syncBufIndex) / _recBlockSize - 10);
-                uint32_t sndCardPlayDelay =
-                    static_cast<uint32_t>(_sndCardPlayDelay);
-
-                _sndCardRecDelay = sndCardRecDelay;
-
-                while (syncBufIndex >= _recBlockSize)
-                {
-                    if (_ptrAudioBuffer)
-                    {
-                        _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
-                        _ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
-                                                    sndCardRecDelay,
-                                                    0);
-
-                        _ptrAudioBuffer->SetTypingStatus(KeyPressed());
-
-                        _UnLock();  // release lock while making the callback
-                        _ptrAudioBuffer->DeliverRecordedData();
-                        _Lock();    // restore the lock
-
-                        // Sanity check to ensure that essential states are not modified during the unlocked period
-                        if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
-                        {
-                            _UnLock();
-                            LOG(LS_ERROR)
-                                << "input state has been modified during"
-                                << " unlocked period";
-                            goto Exit;
-                        }
-                    }
-
-                    // store remaining data which was not able to deliver as 10ms segment
-                    MoveMemory(&syncBuffer[0], &syncBuffer[_recBlockSize*_recAudioFrameSize], (syncBufIndex-_recBlockSize)*_recAudioFrameSize);
-                    syncBufIndex -= _recBlockSize;
-                    sndCardRecDelay -= 10;
-                }
-
-                if (_AGC)
-                {
-                    uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
-                    if (newMicLevel != 0)
-                    {
-                        // The VQE will only deliver non-zero microphone levels when a change is needed.
-                        // Set this new mic level (received from the observer as return value in the callback).
-                        LOG(LS_VERBOSE) << "AGC change of volume: new="
-                                        << newMicLevel;
-                        // We store this outside of the audio buffer to avoid
-                        // having it overwritten by the getter thread.
-                        _newMicLevel = newMicLevel;
-                        SetEvent(_hSetCaptureVolumeEvent);
-                    }
-                }
-            }
-            else
-            {
-                // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the audio samples
-                // must wait for the next processing pass. The client might benefit from keeping a count
-                // of the failed GetBuffer calls. If GetBuffer returns this error repeatedly, the client
-                // can start a new processing loop after shutting down the current client by calling
-                // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio client.
-                LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
-                              << " AUDCLNT_E_BUFFER_ERROR, hr = 0x"
-                              << std::hex << hr << std::dec;
-                goto Exit;
-            }
-
-            _UnLock();
+          // store remaining data which was not able to deliver as 10ms segment
+          MoveMemory(&syncBuffer[0],
+                     &syncBuffer[_recBlockSize * _recAudioFrameSize],
+                     (syncBufIndex - _recBlockSize) * _recAudioFrameSize);
+          syncBufIndex -= _recBlockSize;
+          sndCardRecDelay -= 10;
         }
-    }
 
-    // ---------------------------- THREAD LOOP ---------------------------- <<
+        if (_AGC) {
+          uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
+          if (newMicLevel != 0) {
+            // The VQE will only deliver non-zero microphone levels when a
+            // change is needed. Set this new mic level (received from the
+            // observer as return value in the callback).
+            LOG(LS_VERBOSE) << "AGC change of volume: new=" << newMicLevel;
+            // We store this outside of the audio buffer to avoid
+            // having it overwritten by the getter thread.
+            _newMicLevel = newMicLevel;
+            SetEvent(_hSetCaptureVolumeEvent);
+          }
+        }
+      } else {
+        // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the
+        // audio samples must wait for the next processing pass. The client
+        // might benefit from keeping a count of the failed GetBuffer calls. If
+        // GetBuffer returns this error repeatedly, the client can start a new
+        // processing loop after shutting down the current client by calling
+        // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio
+        // client.
+        LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
+                      << " AUDCLNT_E_BUFFER_ERROR, hr = 0x" << std::hex << hr
+                      << std::dec;
+        goto Exit;
+      }
 
-    if (_ptrClientIn)
-    {
-        hr = _ptrClientIn->Stop();
+      _UnLock();
     }
+  }
+
+  // ---------------------------- THREAD LOOP ---------------------------- <<
+
+  if (_ptrClientIn) {
+    hr = _ptrClientIn->Stop();
+  }
 
 Exit:
-    if (FAILED(hr))
-    {
-        _ptrClientIn->Stop();
-        _UnLock();
-        _TraceCOMError(hr);
-    }
-
-    RevertCaptureThreadPriority();
-
-    _Lock();
-
-    if (keepRecording)
-    {
-        if (_ptrClientIn != NULL)
-        {
-            hr = _ptrClientIn->Stop();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-            hr = _ptrClientIn->Reset();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-        }
-
-        LOG(LS_ERROR)
-            << "Recording error: capturing thread has ended pre-maturely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
-    }
-
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrCaptureClient);
-
+  if (FAILED(hr)) {
+    _ptrClientIn->Stop();
     _UnLock();
+    _TraceCOMError(hr);
+  }
 
-    if (syncBuffer)
-    {
-        delete [] syncBuffer;
+  RevertCaptureThreadPriority();
+
+  _Lock();
+
+  if (keepRecording) {
+    if (_ptrClientIn != NULL) {
+      hr = _ptrClientIn->Stop();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+      hr = _ptrClientIn->Reset();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
     }
 
-    return (DWORD)hr;
+    LOG(LS_ERROR) << "Recording error: capturing thread has ended pre-maturely";
+  } else {
+    LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
+  }
+
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrCaptureClient);
+
+  _UnLock();
+
+  if (syncBuffer) {
+    delete[] syncBuffer;
+  }
+
+  return (DWORD)hr;
 }
 
-int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
-{
+int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable) {
+  if (_recIsInitialized) {
+    LOG(LS_ERROR)
+        << "Attempt to set Windows AEC with recording already initialized";
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        LOG(LS_ERROR)
-            << "Attempt to set Windows AEC with recording already initialized";
-        return -1;
-    }
+  if (_dmo == NULL) {
+    LOG(LS_ERROR)
+        << "Built-in AEC DMO was not initialized properly at create time";
+    return -1;
+  }
 
-    if (_dmo == NULL)
-    {
-        LOG(LS_ERROR)
-            << "Built-in AEC DMO was not initialized properly at create time";
-        return -1;
-    }
-
-    _builtInAecEnabled = enable;
-    return 0;
+  _builtInAecEnabled = enable;
+  return 0;
 }
 
-int AudioDeviceWindowsCore::SetDMOProperties()
-{
-    HRESULT hr = S_OK;
-    assert(_dmo != NULL);
+int AudioDeviceWindowsCore::SetDMOProperties() {
+  HRESULT hr = S_OK;
+  assert(_dmo != NULL);
 
-    rtc::scoped_refptr<IPropertyStore> ps;
-    {
-        IPropertyStore* ptrPS = NULL;
-        hr = _dmo->QueryInterface(IID_IPropertyStore,
-                                  reinterpret_cast<void**>(&ptrPS));
-        if (FAILED(hr) || ptrPS == NULL)
-        {
-            _TraceCOMError(hr);
-            return -1;
-        }
-        ps = ptrPS;
-        SAFE_RELEASE(ptrPS);
+  rtc::scoped_refptr<IPropertyStore> ps;
+  {
+    IPropertyStore* ptrPS = NULL;
+    hr = _dmo->QueryInterface(IID_IPropertyStore,
+                              reinterpret_cast<void**>(&ptrPS));
+    if (FAILED(hr) || ptrPS == NULL) {
+      _TraceCOMError(hr);
+      return -1;
+    }
+    ps = ptrPS;
+    SAFE_RELEASE(ptrPS);
+  }
+
+  // Set the AEC system mode.
+  // SINGLE_CHANNEL_AEC - AEC processing only.
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_SYSTEM_MODE, SINGLE_CHANNEL_AEC)) {
+    return -1;
+  }
+
+  // Set the AEC source mode.
+  // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_DMO_SOURCE_MODE, VARIANT_TRUE) ==
+      -1) {
+    return -1;
+  }
+
+  // Enable the feature mode.
+  // This lets us override all the default processing settings below.
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) == -1) {
+    return -1;
+  }
+
+  // Disable analog AGC (default enabled).
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, VARIANT_FALSE) ==
+      -1) {
+    return -1;
+  }
+
+  // Disable noise suppression (default enabled).
+  // 0 - Disabled, 1 - Enabled
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
+    return -1;
+  }
+
+  // Relevant parameters to leave at default settings:
+  // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
+  // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
+  // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
+  //   TODO(andrew): investigate decresing the length to 128 ms.
+  // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
+  //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
+  //   selected 16 kHz) as long as mic array processing is disabled.
+  // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
+  // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
+
+  // Set the devices selected by VoE. If using a default device, we need to
+  // search for the device index.
+  int inDevIndex = _inputDeviceIndex;
+  int outDevIndex = _outputDeviceIndex;
+  if (!_usingInputDeviceIndex) {
+    ERole role = eCommunications;
+    if (_inputDevice == AudioDeviceModule::kDefaultDevice) {
+      role = eConsole;
     }
 
-    // Set the AEC system mode.
-    // SINGLE_CHANNEL_AEC - AEC processing only.
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_SYSTEM_MODE,
-                        SINGLE_CHANNEL_AEC))
-    {
-        return -1;
+    if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1) {
+      return -1;
+    }
+  }
+
+  if (!_usingOutputDeviceIndex) {
+    ERole role = eCommunications;
+    if (_outputDevice == AudioDeviceModule::kDefaultDevice) {
+      role = eConsole;
     }
 
-    // Set the AEC source mode.
-    // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
-                        VARIANT_TRUE) == -1)
-    {
-        return -1;
+    if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1) {
+      return -1;
     }
+  }
 
-    // Enable the feature mode.
-    // This lets us override all the default processing settings below.
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_FEATURE_MODE,
-                        VARIANT_TRUE) == -1)
-    {
-        return -1;
-    }
+  DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
+                   static_cast<uint32_t>(0x0000ffff & inDevIndex);
+  LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
+                  << ", render device index: " << outDevIndex;
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) == -1) {
+    return -1;
+  }
 
-    // Disable analog AGC (default enabled).
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
-                        VARIANT_FALSE) == -1)
-    {
-        return -1;
-    }
-
-    // Disable noise suppression (default enabled).
-    // 0 - Disabled, 1 - Enabled
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_FEATR_NS,
-                        0) == -1)
-    {
-        return -1;
-    }
-
-    // Relevant parameters to leave at default settings:
-    // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
-    // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
-    // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
-    //   TODO(andrew): investigate decresing the length to 128 ms.
-    // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
-    //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
-    //   selected 16 kHz) as long as mic array processing is disabled.
-    // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
-    // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
-
-    // Set the devices selected by VoE. If using a default device, we need to
-    // search for the device index.
-    int inDevIndex = _inputDeviceIndex;
-    int outDevIndex = _outputDeviceIndex;
-    if (!_usingInputDeviceIndex)
-    {
-        ERole role = eCommunications;
-        if (_inputDevice == AudioDeviceModule::kDefaultDevice)
-        {
-            role = eConsole;
-        }
-
-        if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1)
-        {
-            return -1;
-        }
-    }
-
-    if (!_usingOutputDeviceIndex)
-    {
-        ERole role = eCommunications;
-        if (_outputDevice == AudioDeviceModule::kDefaultDevice)
-        {
-            role = eConsole;
-        }
-
-        if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1)
-        {
-            return -1;
-        }
-    }
-
-    DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
-                     static_cast<uint32_t>(0x0000ffff & inDevIndex);
-    LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
-                    << ", render device index: " << outDevIndex;
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_DEVICE_INDEXES,
-                        devIndex) == -1)
-    {
-        return -1;
-    }
-
-    return 0;
+  return 0;
 }
 
 int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
                                             REFPROPERTYKEY key,
-                                            VARIANT_BOOL value)
-{
-    PROPVARIANT pv;
-    PropVariantInit(&pv);
-    pv.vt = VT_BOOL;
-    pv.boolVal = value;
-    HRESULT hr = ptrPS->SetValue(key, pv);
-    PropVariantClear(&pv);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
-    return 0;
+                                            VARIANT_BOOL value) {
+  PROPVARIANT pv;
+  PropVariantInit(&pv);
+  pv.vt = VT_BOOL;
+  pv.boolVal = value;
+  HRESULT hr = ptrPS->SetValue(key, pv);
+  PropVariantClear(&pv);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+  return 0;
 }
 
 int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
                                             REFPROPERTYKEY key,
-                                            LONG value)
-{
-    PROPVARIANT pv;
-    PropVariantInit(&pv);
-    pv.vt = VT_I4;
-    pv.lVal = value;
-    HRESULT hr = ptrPS->SetValue(key, pv);
-    PropVariantClear(&pv);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
-    return 0;
+                                            LONG value) {
+  PROPVARIANT pv;
+  PropVariantInit(&pv);
+  pv.vt = VT_I4;
+  pv.lVal = value;
+  HRESULT hr = ptrPS->SetValue(key, pv);
+  PropVariantClear(&pv);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4121,40 +3664,33 @@
 //  such devices.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDeviceCollection *pCollection = NULL;
+  HRESULT hr = S_OK;
+  IMMDeviceCollection* pCollection = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(_ptrEnumerator != NULL);
 
-    // Create a fresh list of devices using the specified direction
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                           dir,
-                           DEVICE_STATE_ACTIVE,
-                           &pCollection);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  // Create a fresh list of devices using the specified direction
+  hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE,
+                                          &pCollection);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    if (dir == eRender)
-    {
-        SAFE_RELEASE(_ptrRenderCollection);
-        _ptrRenderCollection = pCollection;
-    }
-    else
-    {
-        SAFE_RELEASE(_ptrCaptureCollection);
-        _ptrCaptureCollection = pCollection;
-    }
+  if (dir == eRender) {
+    SAFE_RELEASE(_ptrRenderCollection);
+    _ptrRenderCollection = pCollection;
+  } else {
+    SAFE_RELEASE(_ptrCaptureCollection);
+    _ptrCaptureCollection = pCollection;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4164,31 +3700,26 @@
 //  current list of such devices.
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    UINT count = 0;
+  HRESULT hr = S_OK;
+  UINT count = 0;
 
-    assert(eRender == dir || eCapture == dir);
+  assert(eRender == dir || eCapture == dir);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->GetCount(&count);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->GetCount(&count);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->GetCount(&count);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->GetCount(&count);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    return static_cast<int16_t> (count);
+  return static_cast<int16_t>(count);
 }
 
 // ----------------------------------------------------------------------------
@@ -4202,34 +3733,32 @@
 //  in _RefreshDeviceList().
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir,
+                                                   int index,
+                                                   LPWSTR szBuffer,
+                                                   int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
+  assert(dir == eRender || dir == eCapture);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->Item(index, &pDevice);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->Item(index, &pDevice);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->Item(index, &pDevice);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->Item(index, &pDevice);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4241,32 +3770,30 @@
 //  Uses: _ptrEnumerator
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir,
+                                                      ERole role,
+                                                      LPWSTR szBuffer,
+                                                      int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(role == eConsole || role == eCommunications);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(role == eConsole || role == eCommunications);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           dir,
-                           role,
-                           &pDevice);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4280,34 +3807,32 @@
 //  in _RefreshDeviceList().
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir,
+                                                 int index,
+                                                 LPWSTR szBuffer,
+                                                 int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
+  assert(dir == eRender || dir == eCapture);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->Item(index, &pDevice);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->Item(index, &pDevice);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->Item(index, &pDevice);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->Item(index, &pDevice);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4319,114 +3844,99 @@
 //  Uses: _ptrEnumerator
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir,
+                                                    ERole role,
+                                                    LPWSTR szBuffer,
+                                                    int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(role == eConsole || role == eCommunications);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(role == eConsole || role == eCommunications);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           dir,
-                           role,
-                           &pDevice);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
                                                        ERole role,
-                                                       int* index)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+                                                       int* index) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    WCHAR szDefaultDeviceID[MAX_PATH] = {0};
-    WCHAR szDeviceID[MAX_PATH] = {0};
+  HRESULT hr = S_OK;
+  WCHAR szDefaultDeviceID[MAX_PATH] = {0};
+  WCHAR szDeviceID[MAX_PATH] = {0};
 
-    const size_t kDeviceIDLength = sizeof(szDeviceID)/sizeof(szDeviceID[0]);
-    assert(kDeviceIDLength ==
-        sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
+  const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]);
+  assert(kDeviceIDLength ==
+         sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
 
-    if (_GetDefaultDeviceID(dir,
-                            role,
-                            szDefaultDeviceID,
-                            kDeviceIDLength) == -1)
+  if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) ==
+      -1) {
+    return -1;
+  }
+
+  IMMDeviceCollection* collection = _ptrCaptureCollection;
+  if (dir == eRender) {
+    collection = _ptrRenderCollection;
+  }
+
+  if (!collection) {
+    LOG(LS_ERROR) << "Device collection not valid";
+    return -1;
+  }
+
+  UINT count = 0;
+  hr = collection->GetCount(&count);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+
+  *index = -1;
+  for (UINT i = 0; i < count; i++) {
+    memset(szDeviceID, 0, sizeof(szDeviceID));
+    rtc::scoped_refptr<IMMDevice> device;
     {
-        return -1;
-    }
-
-    IMMDeviceCollection* collection = _ptrCaptureCollection;
-    if (dir == eRender)
-    {
-        collection = _ptrRenderCollection;
-    }
-
-    if (!collection)
-    {
-        LOG(LS_ERROR) << "Device collection not valid";
-        return -1;
-    }
-
-    UINT count = 0;
-    hr = collection->GetCount(&count);
-    if (FAILED(hr))
-    {
+      IMMDevice* ptrDevice = NULL;
+      hr = collection->Item(i, &ptrDevice);
+      if (FAILED(hr) || ptrDevice == NULL) {
         _TraceCOMError(hr);
         return -1;
+      }
+      device = ptrDevice;
+      SAFE_RELEASE(ptrDevice);
     }
 
-    *index = -1;
-    for (UINT i = 0; i < count; i++)
-    {
-        memset(szDeviceID, 0, sizeof(szDeviceID));
-        rtc::scoped_refptr<IMMDevice> device;
-        {
-            IMMDevice* ptrDevice = NULL;
-            hr = collection->Item(i, &ptrDevice);
-            if (FAILED(hr) || ptrDevice == NULL)
-            {
-                _TraceCOMError(hr);
-                return -1;
-            }
-            device = ptrDevice;
-            SAFE_RELEASE(ptrDevice);
-        }
-
-        if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1)
-        {
-           return -1;
-        }
-
-        if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0)
-        {
-            // Found a match.
-            *index = i;
-            break;
-        }
-
+    if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1) {
+      return -1;
     }
 
-    if (*index == -1)
-    {
-        LOG(LS_ERROR) << "Unable to find collection index for default device";
-        return -1;
+    if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) {
+      // Found a match.
+      *index = i;
+      break;
     }
+  }
 
-    return 0;
+  if (*index == -1) {
+    LOG(LS_ERROR) << "Unable to find collection index for default device";
+    return -1;
+  }
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4435,401 +3945,363 @@
 
 int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
                                                LPWSTR pszBuffer,
-                                               int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+                                               int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    static const WCHAR szDefault[] = L"<Device not available>";
+  static const WCHAR szDefault[] = L"<Device not available>";
 
-    HRESULT hr = E_FAIL;
-    IPropertyStore *pProps = NULL;
-    PROPVARIANT varName;
+  HRESULT hr = E_FAIL;
+  IPropertyStore* pProps = NULL;
+  PROPVARIANT varName;
 
-    assert(pszBuffer != NULL);
-    assert(bufferLen > 0);
+  assert(pszBuffer != NULL);
+  assert(bufferLen > 0);
 
-    if (pDevice != NULL)
-    {
-        hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
-        if (FAILED(hr))
-        {
-            LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
-                          << std::hex << hr << std::dec;
-        }
+  if (pDevice != NULL) {
+    hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
+    if (FAILED(hr)) {
+      LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
+                    << std::hex << hr << std::dec;
     }
+  }
 
-    // Initialize container for property value.
-    PropVariantInit(&varName);
+  // Initialize container for property value.
+  PropVariantInit(&varName);
 
-    if (SUCCEEDED(hr))
-    {
-        // Get the endpoint device's friendly-name property.
-        hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
-        if (FAILED(hr))
-        {
-            LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x"
-                          << std::hex << hr << std::dec;
-        }
+  if (SUCCEEDED(hr)) {
+    // Get the endpoint device's friendly-name property.
+    hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+    if (FAILED(hr)) {
+      LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x" << std::hex
+                    << hr << std::dec;
     }
+  }
 
-    if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt))
-    {
-        hr = E_FAIL;
-            LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
-                          << " hr = 0x" << std::hex << hr << std::dec;
-    }
+  if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt)) {
+    hr = E_FAIL;
+    LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
+                  << " hr = 0x" << std::hex << hr << std::dec;
+  }
 
-    if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt))
-    {
-        // The returned value is not a wide null terminated string.
-        hr = E_UNEXPECTED;
-            LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
-                          << " type, hr = 0x" << std::hex << hr << std::dec;
-    }
+  if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt)) {
+    // The returned value is not a wide null terminated string.
+    hr = E_UNEXPECTED;
+    LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
+                  << " type, hr = 0x" << std::hex << hr << std::dec;
+  }
 
-    if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
-    {
-        // Copy the valid device name to the provided ouput buffer.
-        wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
-    }
-    else
-    {
-        // Failed to find the device name.
-        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
-    }
+  if (SUCCEEDED(hr) && (varName.pwszVal != NULL)) {
+    // Copy the valid device name to the provided ouput buffer.
+    wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
+  } else {
+    // Failed to find the device name.
+    wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+  }
 
-    PropVariantClear(&varName);
-    SAFE_RELEASE(pProps);
+  PropVariantClear(&varName);
+  SAFE_RELEASE(pProps);
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetDeviceID
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice,
+                                             LPWSTR pszBuffer,
+                                             int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    static const WCHAR szDefault[] = L"<Device not available>";
+  static const WCHAR szDefault[] = L"<Device not available>";
 
-    HRESULT hr = E_FAIL;
-    LPWSTR pwszID = NULL;
+  HRESULT hr = E_FAIL;
+  LPWSTR pwszID = NULL;
 
-    assert(pszBuffer != NULL);
-    assert(bufferLen > 0);
+  assert(pszBuffer != NULL);
+  assert(bufferLen > 0);
 
-    if (pDevice != NULL)
-    {
-        hr = pDevice->GetId(&pwszID);
-    }
+  if (pDevice != NULL) {
+    hr = pDevice->GetId(&pwszID);
+  }
 
-    if (hr == S_OK)
-    {
-        // Found the device ID.
-        wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
-    }
-    else
-    {
-        // Failed to find the device ID.
-        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
-    }
+  if (hr == S_OK) {
+    // Found the device ID.
+    wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
+  } else {
+    // Failed to find the device ID.
+    wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+  }
 
-    CoTaskMemFree(pwszID);
-    return 0;
+  CoTaskMemFree(pwszID);
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetDefaultDevice
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir,
+                                                  ERole role,
+                                                  IMMDevice** ppDevice) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                                   dir,
-                                   role,
-                                   ppDevice);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetListDevice
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice)
-{
-    HRESULT hr(S_OK);
+int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir,
+                                               int index,
+                                               IMMDevice** ppDevice) {
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    IMMDeviceCollection *pCollection = NULL;
+  IMMDeviceCollection* pCollection = NULL;
 
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                               dir,
-                               DEVICE_STATE_ACTIVE,        // only active endpoints are OK
-                               &pCollection);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  hr = _ptrEnumerator->EnumAudioEndpoints(
+      dir,
+      DEVICE_STATE_ACTIVE,  // only active endpoints are OK
+      &pCollection);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    hr = pCollection->Item(
-                        index,
-                        ppDevice);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  hr = pCollection->Item(index, ppDevice);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _EnumerateEndpointDevicesAll
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(
+    EDataFlow dataFlow) const {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    HRESULT hr = S_OK;
-    IMMDeviceCollection *pCollection = NULL;
-    IMMDevice *pEndpoint = NULL;
-    IPropertyStore *pProps = NULL;
-    IAudioEndpointVolume* pEndpointVolume = NULL;
-    LPWSTR pwszID = NULL;
+  HRESULT hr = S_OK;
+  IMMDeviceCollection* pCollection = NULL;
+  IMMDevice* pEndpoint = NULL;
+  IPropertyStore* pProps = NULL;
+  IAudioEndpointVolume* pEndpointVolume = NULL;
+  LPWSTR pwszID = NULL;
 
-    // Generate a collection of audio endpoint devices in the system.
-    // Get states for *all* endpoint devices.
-    // Output: IMMDeviceCollection interface.
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                                 dataFlow,            // data-flow direction (input parameter)
-                                 DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
-                                 &pCollection);        // release interface when done
+  // Generate a collection of audio endpoint devices in the system.
+  // Get states for *all* endpoint devices.
+  // Output: IMMDeviceCollection interface.
+  hr = _ptrEnumerator->EnumAudioEndpoints(
+      dataFlow,  // data-flow direction (input parameter)
+      DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
+      &pCollection);  // release interface when done
 
-    EXIT_ON_ERROR(hr);
+  EXIT_ON_ERROR(hr);
 
-    // use the IMMDeviceCollection interface...
+  // use the IMMDeviceCollection interface...
 
-    UINT count = 0;
+  UINT count = 0;
 
-    // Retrieve a count of the devices in the device collection.
-    hr = pCollection->GetCount(&count);
-    EXIT_ON_ERROR(hr);
-    if (dataFlow == eRender)
-        LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): "
-                        << count;
-    else if (dataFlow == eCapture)
-        LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): "
-                        << count;
+  // Retrieve a count of the devices in the device collection.
+  hr = pCollection->GetCount(&count);
+  EXIT_ON_ERROR(hr);
+  if (dataFlow == eRender)
+    LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): " << count;
+  else if (dataFlow == eCapture)
+    LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): " << count;
 
-    if (count == 0)
-    {
-        return 0;
-    }
-
-    // Each loop prints the name of an endpoint device.
-    for (ULONG i = 0; i < count; i++)
-    {
-        LOG(LS_VERBOSE) << "Endpoint " << i << ":";
-
-        // Get pointer to endpoint number i.
-        // Output: IMMDevice interface.
-        hr = pCollection->Item(
-                            i,
-                            &pEndpoint);
-        CONTINUE_ON_ERROR(hr);
-
-        // use the IMMDevice interface of the specified endpoint device...
-
-        // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-        hr = pEndpoint->GetId(&pwszID);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "ID string    : " << pwszID;
-
-        // Retrieve an interface to the device's property store.
-        // Output: IPropertyStore interface.
-        hr = pEndpoint->OpenPropertyStore(
-                          STGM_READ,
-                          &pProps);
-        CONTINUE_ON_ERROR(hr);
-
-        // use the IPropertyStore interface...
-
-        PROPVARIANT varName;
-        // Initialize container for property value.
-        PropVariantInit(&varName);
-
-        // Get the endpoint's friendly-name property.
-        // Example: "Speakers (Realtek High Definition Audio)"
-        hr = pProps->GetValue(
-                       PKEY_Device_FriendlyName,
-                       &varName);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
-
-        // Get the endpoint's current device state
-        DWORD dwState;
-        hr = pEndpoint->GetState(&dwState);
-        CONTINUE_ON_ERROR(hr);
-        if (dwState & DEVICE_STATE_ACTIVE)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : *ACTIVE*";
-        if (dwState & DEVICE_STATE_DISABLED)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : DISABLED";
-        if (dwState & DEVICE_STATE_NOTPRESENT)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : NOTPRESENT";
-        if (dwState & DEVICE_STATE_UNPLUGGED)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : UNPLUGGED";
-
-        // Check the hardware volume capabilities.
-        DWORD dwHwSupportMask = 0;
-        hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
-                               NULL, (void**)&pEndpointVolume);
-        CONTINUE_ON_ERROR(hr);
-        hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
-        CONTINUE_ON_ERROR(hr);
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
-            // The audio endpoint device supports a hardware volume control
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_VOLUME";
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
-            // The audio endpoint device supports a hardware mute control
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_MUTE";
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
-            // The audio endpoint device supports a hardware peak meter
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_METER";
-
-        // Check the channel count (#channels in the audio stream that enters or leaves the audio endpoint device)
-        UINT nChannelCount(0);
-        hr = pEndpointVolume->GetChannelCount(
-                                &nChannelCount);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "#channels    : " << nChannelCount;
-
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
-        {
-            // Get the volume range.
-            float fLevelMinDB(0.0);
-            float fLevelMaxDB(0.0);
-            float fVolumeIncrementDB(0.0);
-            hr = pEndpointVolume->GetVolumeRange(
-                                    &fLevelMinDB,
-                                    &fLevelMaxDB,
-                                    &fVolumeIncrementDB);
-            CONTINUE_ON_ERROR(hr);
-            LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
-                            << fLevelMaxDB << " (max), " << fVolumeIncrementDB
-                            << " (inc) [dB]";
-
-            // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is divided
-            // into n uniform intervals of size vinc = fVolumeIncrementDB, where
-            // n = (vmax ?vmin) / vinc.
-            // The values vmin, vmax, and vinc are measured in decibels. The client can set
-            // the volume level to one of n + 1 discrete values in the range from vmin to vmax.
-            int n = (int)((fLevelMaxDB-fLevelMinDB)/fVolumeIncrementDB);
-            LOG(LS_VERBOSE) << "#intervals   : " << n;
-
-            // Get information about the current step in the volume range.
-            // This method represents the volume level of the audio stream that enters or leaves
-            // the audio endpoint device as an index or "step" in a range of discrete volume levels.
-            // Output value nStepCount is the number of steps in the range. Output value nStep
-            // is the step index of the current volume level. If the number of steps is n = nStepCount,
-            // then step index nStep can assume values from 0 (minimum volume) to n ?1 (maximum volume).
-            UINT nStep(0);
-            UINT nStepCount(0);
-            hr = pEndpointVolume->GetVolumeStepInfo(
-                                    &nStep,
-                                    &nStepCount);
-            CONTINUE_ON_ERROR(hr);
-            LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
-                            << nStepCount << " (nStepCount)";
-        }
-Next:
-        if (FAILED(hr)) {
-          LOG(LS_VERBOSE) << "Error when logging device information";
-        }
-        CoTaskMemFree(pwszID);
-        pwszID = NULL;
-        PropVariantClear(&varName);
-        SAFE_RELEASE(pProps);
-        SAFE_RELEASE(pEndpoint);
-        SAFE_RELEASE(pEndpointVolume);
-    }
-    SAFE_RELEASE(pCollection);
+  if (count == 0) {
     return 0;
+  }
 
-Exit:
-    _TraceCOMError(hr);
+  // Each loop prints the name of an endpoint device.
+  for (ULONG i = 0; i < count; i++) {
+    LOG(LS_VERBOSE) << "Endpoint " << i << ":";
+
+    // Get pointer to endpoint number i.
+    // Output: IMMDevice interface.
+    hr = pCollection->Item(i, &pEndpoint);
+    CONTINUE_ON_ERROR(hr);
+
+    // use the IMMDevice interface of the specified endpoint device...
+
+    // Get the endpoint ID string (uniquely identifies the device among all
+    // audio endpoint devices)
+    hr = pEndpoint->GetId(&pwszID);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "ID string    : " << pwszID;
+
+    // Retrieve an interface to the device's property store.
+    // Output: IPropertyStore interface.
+    hr = pEndpoint->OpenPropertyStore(STGM_READ, &pProps);
+    CONTINUE_ON_ERROR(hr);
+
+    // use the IPropertyStore interface...
+
+    PROPVARIANT varName;
+    // Initialize container for property value.
+    PropVariantInit(&varName);
+
+    // Get the endpoint's friendly-name property.
+    // Example: "Speakers (Realtek High Definition Audio)"
+    hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
+
+    // Get the endpoint's current device state
+    DWORD dwState;
+    hr = pEndpoint->GetState(&dwState);
+    CONTINUE_ON_ERROR(hr);
+    if (dwState & DEVICE_STATE_ACTIVE)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : *ACTIVE*";
+    if (dwState & DEVICE_STATE_DISABLED)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : DISABLED";
+    if (dwState & DEVICE_STATE_NOTPRESENT)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : NOTPRESENT";
+    if (dwState & DEVICE_STATE_UNPLUGGED)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : UNPLUGGED";
+
+    // Check the hardware volume capabilities.
+    DWORD dwHwSupportMask = 0;
+    hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                             (void**)&pEndpointVolume);
+    CONTINUE_ON_ERROR(hr);
+    hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
+    CONTINUE_ON_ERROR(hr);
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
+      // The audio endpoint device supports a hardware volume control
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_VOLUME";
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
+      // The audio endpoint device supports a hardware mute control
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_MUTE";
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
+      // The audio endpoint device supports a hardware peak meter
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_METER";
+
+    // Check the channel count (#channels in the audio stream that enters or
+    // leaves the audio endpoint device)
+    UINT nChannelCount(0);
+    hr = pEndpointVolume->GetChannelCount(&nChannelCount);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "#channels    : " << nChannelCount;
+
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) {
+      // Get the volume range.
+      float fLevelMinDB(0.0);
+      float fLevelMaxDB(0.0);
+      float fVolumeIncrementDB(0.0);
+      hr = pEndpointVolume->GetVolumeRange(&fLevelMinDB, &fLevelMaxDB,
+                                           &fVolumeIncrementDB);
+      CONTINUE_ON_ERROR(hr);
+      LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
+                      << fLevelMaxDB << " (max), " << fVolumeIncrementDB
+                      << " (inc) [dB]";
+
+      // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is
+      // divided into n uniform intervals of size vinc = fVolumeIncrementDB,
+      // where n = (vmax ?vmin) / vinc. The values vmin, vmax, and vinc are
+      // measured in decibels. The client can set the volume level to one of n +
+      // 1 discrete values in the range from vmin to vmax.
+      int n = (int)((fLevelMaxDB - fLevelMinDB) / fVolumeIncrementDB);
+      LOG(LS_VERBOSE) << "#intervals   : " << n;
+
+      // Get information about the current step in the volume range.
+      // This method represents the volume level of the audio stream that enters
+      // or leaves the audio endpoint device as an index or "step" in a range of
+      // discrete volume levels. Output value nStepCount is the number of steps
+      // in the range. Output value nStep is the step index of the current
+      // volume level. If the number of steps is n = nStepCount, then step index
+      // nStep can assume values from 0 (minimum volume) to n ?1 (maximum
+      // volume).
+      UINT nStep(0);
+      UINT nStepCount(0);
+      hr = pEndpointVolume->GetVolumeStepInfo(&nStep, &nStepCount);
+      CONTINUE_ON_ERROR(hr);
+      LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
+                      << nStepCount << " (nStepCount)";
+    }
+  Next:
+    if (FAILED(hr)) {
+      LOG(LS_VERBOSE) << "Error when logging device information";
+    }
     CoTaskMemFree(pwszID);
     pwszID = NULL;
-    SAFE_RELEASE(pCollection);
+    PropVariantClear(&varName);
+    SAFE_RELEASE(pProps);
     SAFE_RELEASE(pEndpoint);
     SAFE_RELEASE(pEndpointVolume);
-    SAFE_RELEASE(pProps);
-    return -1;
+  }
+  SAFE_RELEASE(pCollection);
+  return 0;
+
+Exit:
+  _TraceCOMError(hr);
+  CoTaskMemFree(pwszID);
+  pwszID = NULL;
+  SAFE_RELEASE(pCollection);
+  SAFE_RELEASE(pEndpoint);
+  SAFE_RELEASE(pEndpointVolume);
+  SAFE_RELEASE(pProps);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  _TraceCOMError
 // ----------------------------------------------------------------------------
 
-void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
-{
-    TCHAR buf[MAXERRORLENGTH];
-    TCHAR errorText[MAXERRORLENGTH];
+void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const {
+  TCHAR buf[MAXERRORLENGTH];
+  TCHAR errorText[MAXERRORLENGTH];
 
-    const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
-                          FORMAT_MESSAGE_IGNORE_INSERTS;
-    const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+  const DWORD dwFlags =
+      FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+  const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
 
-    // Gets the system's human readable message string for this HRESULT.
-    // All error message in English by default.
-    DWORD messageLength = ::FormatMessageW(dwFlags,
-                                           0,
-                                           hr,
-                                           dwLangID,
-                                           errorText,
-                                           MAXERRORLENGTH,
-                                           NULL);
+  // Gets the system's human readable message string for this HRESULT.
+  // All error message in English by default.
+  DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+                                         MAXERRORLENGTH, NULL);
 
-    assert(messageLength <= MAXERRORLENGTH);
+  assert(messageLength <= MAXERRORLENGTH);
 
-    // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
-    for (; messageLength && ::isspace(errorText[messageLength - 1]);
-         --messageLength)
-    {
-        errorText[messageLength - 1] = '\0';
-    }
+  // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+  for (; messageLength && ::isspace(errorText[messageLength - 1]);
+       --messageLength) {
+    errorText[messageLength - 1] = '\0';
+  }
 
-    LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
-    StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
-    StringCchCat(buf, MAXERRORLENGTH, errorText);
-    LOG(LS_ERROR) << WideToUTF8(buf);
+  LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
+  StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
+  StringCchCat(buf, MAXERRORLENGTH, errorText);
+  LOG(LS_ERROR) << WideToUTF8(buf);
 }
 
 // ----------------------------------------------------------------------------
@@ -4838,29 +4310,27 @@
 
 char* AudioDeviceWindowsCore::WideToUTF8(const TCHAR* src) const {
 #ifdef UNICODE
-    const size_t kStrLen = sizeof(_str);
-    memset(_str, 0, kStrLen);
-    // Get required size (in bytes) to be able to complete the conversion.
-    unsigned int required_size = (unsigned int)WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
-    if (required_size <= kStrLen)
-    {
-        // Process the entire input string, including the terminating null char.
-        if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
-            memset(_str, 0, kStrLen);
-    }
-    return _str;
+  const size_t kStrLen = sizeof(_str);
+  memset(_str, 0, kStrLen);
+  // Get required size (in bytes) to be able to complete the conversion.
+  unsigned int required_size =
+      (unsigned int)WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
+  if (required_size <= kStrLen) {
+    // Process the entire input string, including the terminating null char.
+    if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
+      memset(_str, 0, kStrLen);
+  }
+  return _str;
 #else
-    return const_cast<char*>(src);
+  return const_cast<char*>(src);
 #endif
 }
 
-
-bool AudioDeviceWindowsCore::KeyPressed() const{
-
+bool AudioDeviceWindowsCore::KeyPressed() const {
   int key_down = 0;
   for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
     short res = GetAsyncKeyState(key);
-    key_down |= res & 0x1; // Get the LSB
+    key_down |= res & 0x1;  // Get the LSB
   }
   return (key_down > 0);
 }