Formatting some files with LOG macros usage.

In order to create a clean CL to switch to RTC_ prefixed LOG macros
this CL runs `git cl format --full` on the files with LOG macros in
the following directories:
- modules/audio_device
- modules/media_file
- modules/video_capture

This CL has been automatically generated with:

for m in PLOG \
  LOG_TAG \
  LOG_GLEM \
  LOG_GLE_EX \
  LOG_GLE \
  LAST_SYSTEM_ERROR \
  LOG_ERRNO_EX \
  LOG_ERRNO \
  LOG_ERR_EX \
  LOG_ERR \
  LOG_V \
  LOG_F \
  LOG_T_F \
  LOG_E \
  LOG_T \
  LOG_CHECK_LEVEL_V \
  LOG_CHECK_LEVEL \
  LOG
do
  for d in media_file video_capture audio_device; do
    cd modules/$d
    git grep -l $m | grep -E "\.(cc|h|m|mm)$" | xargs sed -i "1 s/$/ /"
    cd ../..
  done
done
git cl format --full

Bug: webrtc:8452
Change-Id: I2858b6928e6bd79957f2e5e0b07028eb68a304b2
Reviewed-on: https://webrtc-review.googlesource.com/21322
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20613}
diff --git a/modules/audio_device/android/audio_device_template.h b/modules/audio_device/android/audio_device_template.h
index 5be3a9e..04ff1bc 100644
--- a/modules/audio_device/android/audio_device_template.h
+++ b/modules/audio_device/android/audio_device_template.h
@@ -100,18 +100,16 @@
     return 1;
   }
 
-  int32_t PlayoutDeviceName(
-      uint16_t index,
-      char name[kAdmMaxDeviceNameSize],
-      char guid[kAdmMaxGuidSize]) override {
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override {
     FATAL() << "Should never be called";
     return -1;
   }
 
-  int32_t RecordingDeviceName(
-      uint16_t index,
-      char name[kAdmMaxDeviceNameSize],
-      char guid[kAdmMaxGuidSize]) override {
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override {
     FATAL() << "Should never be called";
     return -1;
   }
@@ -215,9 +213,7 @@
     return err;
   }
 
-  bool Recording() const override {
-    return input_.Recording() ;
-  }
+  bool Recording() const override { return input_.Recording(); }
 
   int32_t SetAGC(bool enable) override {
     if (enable) {
@@ -276,7 +272,7 @@
     return output_.MinSpeakerVolume(minVolume);
   }
 
-  int32_t MicrophoneVolumeIsAvailable(bool& available) override{
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override {
     available = false;
     return -1;
   }
diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc
index 9e38a85..6f385a3 100644
--- a/modules/audio_device/android/audio_manager.cc
+++ b/modules/audio_device/android/audio_manager.cc
@@ -107,9 +107,9 @@
   // that the user explicitly selects the high-latency audio path, hence we use
   // the selected |audio_layer| here to set the delay estimate.
   delay_estimate_in_milliseconds_ =
-      (audio_layer == AudioDeviceModule::kAndroidJavaAudio) ?
-      kHighLatencyModeDelayEstimateInMilliseconds :
-      kLowLatencyModeDelayEstimateInMilliseconds;
+      (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
+          ? kHighLatencyModeDelayEstimateInMilliseconds
+          : kLowLatencyModeDelayEstimateInMilliseconds;
   ALOGD("delay_estimate_in_milliseconds: %d", delay_estimate_in_milliseconds_);
 }
 
@@ -201,8 +201,9 @@
   ALOGD("IsLowLatencyPlayoutSupported()");
   // Some devices are blacklisted for usage of OpenSL ES even if they report
   // that low-latency playout is supported. See b/21485703 for details.
-  return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage() ?
-      false : low_latency_playout_;
+  return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
+             ? false
+             : low_latency_playout_;
 }
 
 bool AudioManager::IsLowLatencyRecordSupported() const {
diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc
index b437644..79f8c8b 100644
--- a/modules/audio_device/android/audio_record_jni.cc
+++ b/modules/audio_device/android/audio_record_jni.cc
@@ -41,8 +41,8 @@
 
 AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
 
-int AudioRecordJni::JavaAudioRecord::InitRecording(
-    int sample_rate, size_t channels) {
+int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
+                                                   size_t channels) {
   return audio_record_->CallIntMethod(init_recording_,
                                       static_cast<jint>(sample_rate),
                                       static_cast<jint>(channels));
@@ -83,10 +83,10 @@
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
-      reinterpret_cast<void*>(
-          &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
+       reinterpret_cast<void*>(
+           &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
       {"nativeDataIsRecorded", "(IJ)V",
-      reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
+       reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
       arraysize(native_methods));
@@ -168,7 +168,7 @@
   thread_checker_java_.DetachFromThread();
   initialized_ = false;
   recording_ = false;
-  direct_buffer_address_= nullptr;
+  direct_buffer_address_ = nullptr;
   return 0;
 }
 
@@ -206,29 +206,32 @@
   return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
 }
 
-void JNICALL AudioRecordJni::CacheDirectBufferAddress(
-    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord) {
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                      jobject obj,
+                                                      jobject byte_buffer,
+                                                      jlong nativeAudioRecord) {
   webrtc::AudioRecordJni* this_object =
-      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
   this_object->OnCacheDirectBufferAddress(env, byte_buffer);
 }
 
-void AudioRecordJni::OnCacheDirectBufferAddress(
-    JNIEnv* env, jobject byte_buffer) {
+void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                                jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!direct_buffer_address_);
-  direct_buffer_address_ =
-      env->GetDirectBufferAddress(byte_buffer);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
   direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
 }
 
-void JNICALL AudioRecordJni::DataIsRecorded(
-  JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
+void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
+                                            jobject obj,
+                                            jint length,
+                                            jlong nativeAudioRecord) {
   webrtc::AudioRecordJni* this_object =
-      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
   this_object->OnDataIsRecorded(length);
 }
 
diff --git a/modules/audio_device/android/audio_track_jni.cc b/modules/audio_device/android/audio_track_jni.cc
index 45e59c4..89d4af0 100644
--- a/modules/audio_device/android/audio_track_jni.cc
+++ b/modules/audio_device/android/audio_track_jni.cc
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "modules/audio_device/android/audio_manager.h"
 #include "modules/audio_device/android/audio_track_jni.h"
+#include "modules/audio_device/android/audio_manager.h"
 
 #include <utility>
 
@@ -82,10 +82,10 @@
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
-      reinterpret_cast<void*>(
-          &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
+       reinterpret_cast<void*>(
+           &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
       {"nativeGetPlayoutData", "(IJ)V",
-      reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
+       reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
       arraysize(native_methods));
@@ -122,8 +122,8 @@
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!initialized_);
   RTC_DCHECK(!playing_);
-  if (!j_audio_track_->InitPlayout(
-      audio_parameters_.sample_rate(), audio_parameters_.channels())) {
+  if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
+                                   audio_parameters_.channels())) {
     ALOGE("InitPlayout failed!");
     return -1;
   }
@@ -209,20 +209,21 @@
   audio_device_buffer_->SetPlayoutChannels(channels);
 }
 
-void JNICALL AudioTrackJni::CacheDirectBufferAddress(
-    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack) {
+void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                     jobject obj,
+                                                     jobject byte_buffer,
+                                                     jlong nativeAudioTrack) {
   webrtc::AudioTrackJni* this_object =
-      reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
   this_object->OnCacheDirectBufferAddress(env, byte_buffer);
 }
 
-void AudioTrackJni::OnCacheDirectBufferAddress(
-    JNIEnv* env, jobject byte_buffer) {
+void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                               jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!direct_buffer_address_);
-  direct_buffer_address_ =
-      env->GetDirectBufferAddress(byte_buffer);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
   direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
@@ -231,10 +232,12 @@
   ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
 }
 
-void JNICALL AudioTrackJni::GetPlayoutData(
-  JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack) {
+void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
+                                           jobject obj,
+                                           jint length,
+                                           jlong nativeAudioTrack) {
   webrtc::AudioTrackJni* this_object =
-      reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
   this_object->OnGetPlayoutData(static_cast<size_t>(length));
 }
 
diff --git a/modules/audio_device/android/opensles_player.cc b/modules/audio_device/android/opensles_player.cc
index 1530741..7ac6912 100644
--- a/modules/audio_device/android/opensles_player.cc
+++ b/modules/audio_device/android/opensles_player.cc
@@ -289,10 +289,10 @@
   SLDataSink audio_sink = {&locator_output_mix, nullptr};
 
   // Define interfaces that we indend to use and realize.
-  const SLInterfaceID interface_ids[] = {
-      SL_IID_ANDROIDCONFIGURATION, SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
-  const SLboolean interface_required[] = {
-      SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+  const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
+                                         SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+  const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+                                          SL_BOOLEAN_TRUE};
 
   // Create the audio player on the engine interface.
   RETURN_ON_ERROR(
diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h
index a68bbdf..8b8e907 100644
--- a/modules/audio_device/audio_device_buffer.h
+++ b/modules/audio_device/audio_device_buffer.h
@@ -197,7 +197,7 @@
   // dynamically.
   rtc::BufferT<int16_t> rec_buffer_ RTC_ACCESS_ON(recording_thread_checker_);
 
-  // AGC parameters.
+// AGC parameters.
 #if !defined(WEBRTC_WIN)
   uint32_t current_mic_level_ RTC_ACCESS_ON(recording_thread_checker_);
 #else
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index 12caa2e..0a669ca 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -386,7 +386,7 @@
 int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) {
   LOG(INFO) << __FUNCTION__;
   CHECKinitialized_();
-  bool isAvailable  = false;
+  bool isAvailable = false;
   if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
     return -1;
   }
diff --git a/modules/audio_device/dummy/file_audio_device.cc b/modules/audio_device/dummy/file_audio_device.cc
index 6b0ee04..6954762 100644
--- a/modules/audio_device/dummy/file_audio_device.cc
+++ b/modules/audio_device/dummy/file_audio_device.cc
@@ -26,24 +26,23 @@
     kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
 
 FileAudioDevice::FileAudioDevice(const char* inputFilename,
-                                 const char* outputFilename):
-    _ptrAudioBuffer(NULL),
-    _recordingBuffer(NULL),
-    _playoutBuffer(NULL),
-    _recordingFramesLeft(0),
-    _playoutFramesLeft(0),
-    _recordingBufferSizeIn10MS(0),
-    _recordingFramesIn10MS(0),
-    _playoutFramesIn10MS(0),
-    _playing(false),
-    _recording(false),
-    _lastCallPlayoutMillis(0),
-    _lastCallRecordMillis(0),
-    _outputFile(*FileWrapper::Create()),
-    _inputFile(*FileWrapper::Create()),
-    _outputFilename(outputFilename),
-    _inputFilename(inputFilename) {
-}
+                                 const char* outputFilename)
+    : _ptrAudioBuffer(NULL),
+      _recordingBuffer(NULL),
+      _playoutBuffer(NULL),
+      _recordingFramesLeft(0),
+      _playoutFramesLeft(0),
+      _recordingBufferSizeIn10MS(0),
+      _recordingFramesIn10MS(0),
+      _playoutFramesIn10MS(0),
+      _playing(false),
+      _recording(false),
+      _lastCallPlayoutMillis(0),
+      _lastCallRecordMillis(0),
+      _outputFile(*FileWrapper::Create()),
+      _inputFile(*FileWrapper::Create()),
+      _outputFilename(outputFilename),
+      _inputFilename(inputFilename) {}
 
 FileAudioDevice::~FileAudioDevice() {
   delete &_outputFile;
@@ -59,9 +58,13 @@
   return InitStatus::OK;
 }
 
-int32_t FileAudioDevice::Terminate() { return 0; }
+int32_t FileAudioDevice::Terminate() {
+  return 0;
+}
 
-bool FileAudioDevice::Initialized() const { return true; }
+bool FileAudioDevice::Initialized() const {
+  return true;
+}
 
 int16_t FileAudioDevice::PlayoutDevices() {
   return 1;
@@ -72,8 +75,8 @@
 }
 
 int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
-                                            char name[kAdmMaxDeviceNameSize],
-                                            char guid[kAdmMaxGuidSize]) {
+                                           char name[kAdmMaxDeviceNameSize],
+                                           char guid[kAdmMaxGuidSize]) {
   const char* kName = "dummy_device";
   const char* kGuid = "dummy_device_unique_id";
   if (index < 1) {
@@ -87,8 +90,8 @@
 }
 
 int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
-                                              char name[kAdmMaxDeviceNameSize],
-                                              char guid[kAdmMaxGuidSize]) {
+                                             char name[kAdmMaxDeviceNameSize],
+                                             char guid[kAdmMaxGuidSize]) {
   const char* kName = "dummy_device";
   const char* kGuid = "dummy_device_unique_id";
   if (index < 1) {
@@ -138,9 +141,9 @@
 
 int32_t FileAudioDevice::InitPlayout() {
   if (_ptrAudioBuffer) {
-      // Update webrtc audio buffer with the selected parameters
-      _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
-      _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
+    _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
   }
   return 0;
 }
@@ -180,7 +183,7 @@
 
 int32_t FileAudioDevice::StartPlayout() {
   if (_playing) {
-      return 0;
+    return 0;
   }
 
   _playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
@@ -188,7 +191,7 @@
   _playoutFramesLeft = 0;
 
   if (!_playoutBuffer) {
-      _playoutBuffer = new int8_t[kPlayoutBufferSize];
+    _playoutBuffer = new int8_t[kPlayoutBufferSize];
   }
   if (!_playoutBuffer) {
     _playing = false;
@@ -200,7 +203,7 @@
       !_outputFile.OpenFile(_outputFilename.c_str(), false)) {
     LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename;
     _playing = false;
-    delete [] _playoutBuffer;
+    delete[] _playoutBuffer;
     _playoutBuffer = NULL;
     return -1;
   }
@@ -210,32 +213,30 @@
   _ptrThreadPlay->Start();
   _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
 
-  LOG(LS_INFO) << "Started playout capture to output file: "
-               << _outputFilename;
+  LOG(LS_INFO) << "Started playout capture to output file: " << _outputFilename;
   return 0;
 }
 
 int32_t FileAudioDevice::StopPlayout() {
   {
-      rtc::CritScope lock(&_critSect);
-      _playing = false;
+    rtc::CritScope lock(&_critSect);
+    _playing = false;
   }
 
   // stop playout thread first
   if (_ptrThreadPlay) {
-      _ptrThreadPlay->Stop();
-      _ptrThreadPlay.reset();
+    _ptrThreadPlay->Stop();
+    _ptrThreadPlay.reset();
   }
 
   rtc::CritScope lock(&_critSect);
 
   _playoutFramesLeft = 0;
-  delete [] _playoutBuffer;
+  delete[] _playoutBuffer;
   _playoutBuffer = NULL;
   _outputFile.CloseFile();
 
-  LOG(LS_INFO) << "Stopped playout capture to output file: "
-               << _outputFilename;
+  LOG(LS_INFO) << "Stopped playout capture to output file: " << _outputFilename;
   return 0;
 }
 
@@ -247,11 +248,10 @@
   _recording = true;
 
   // Make sure we only create the buffer once.
-  _recordingBufferSizeIn10MS = _recordingFramesIn10MS *
-                               kRecordingNumChannels *
-                               2;
+  _recordingBufferSizeIn10MS =
+      _recordingFramesIn10MS * kRecordingNumChannels * 2;
   if (!_recordingBuffer) {
-      _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+    _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
   }
 
   if (!_inputFilename.empty() &&
@@ -269,13 +269,11 @@
   _ptrThreadRec->Start();
   _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
 
-  LOG(LS_INFO) << "Started recording from input file: "
-               << _inputFilename;
+  LOG(LS_INFO) << "Started recording from input file: " << _inputFilename;
 
   return 0;
 }
 
-
 int32_t FileAudioDevice::StopRecording() {
   {
     rtc::CritScope lock(&_critSect);
@@ -283,20 +281,19 @@
   }
 
   if (_ptrThreadRec) {
-      _ptrThreadRec->Stop();
-      _ptrThreadRec.reset();
+    _ptrThreadRec->Stop();
+    _ptrThreadRec.reset();
   }
 
   rtc::CritScope lock(&_critSect);
   _recordingFramesLeft = 0;
   if (_recordingBuffer) {
-      delete [] _recordingBuffer;
-      _recordingBuffer = NULL;
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
   }
   _inputFile.CloseFile();
 
-  LOG(LS_INFO) << "Stopped recording from input file: "
-               << _inputFilename;
+  LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename;
   return 0;
 }
 
@@ -304,25 +301,41 @@
   return _recording;
 }
 
-int32_t FileAudioDevice::SetAGC(bool enable) { return -1; }
+int32_t FileAudioDevice::SetAGC(bool enable) {
+  return -1;
+}
 
-bool FileAudioDevice::AGC() const { return false; }
+bool FileAudioDevice::AGC() const {
+  return false;
+}
 
-int32_t FileAudioDevice::InitSpeaker() { return -1; }
+int32_t FileAudioDevice::InitSpeaker() {
+  return -1;
+}
 
-bool FileAudioDevice::SpeakerIsInitialized() const { return false; }
+bool FileAudioDevice::SpeakerIsInitialized() const {
+  return false;
+}
 
-int32_t FileAudioDevice::InitMicrophone() { return 0; }
+int32_t FileAudioDevice::InitMicrophone() {
+  return 0;
+}
 
-bool FileAudioDevice::MicrophoneIsInitialized() const { return true; }
+bool FileAudioDevice::MicrophoneIsInitialized() const {
+  return true;
+}
 
 int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
   return -1;
 }
 
-int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) { return -1; }
+int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const { return -1; }
+int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
   return -1;
@@ -336,7 +349,9 @@
   return -1;
 }
 
-int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) { return -1; }
+int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) {
+  return -1;
+}
 
 int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
   return -1;
@@ -350,19 +365,29 @@
   return -1;
 }
 
-int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) { return -1; }
+int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SetSpeakerMute(bool enable) { return -1; }
+int32_t FileAudioDevice::SetSpeakerMute(bool enable) {
+  return -1;
+}
 
-int32_t FileAudioDevice::SpeakerMute(bool& enabled) const { return -1; }
+int32_t FileAudioDevice::SpeakerMute(bool& enabled) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
   return -1;
 }
 
-int32_t FileAudioDevice::SetMicrophoneMute(bool enable) { return -1; }
+int32_t FileAudioDevice::SetMicrophoneMute(bool enable) {
+  return -1;
+}
 
-int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const { return -1; }
+int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const {
+  return -1;
+}
 
 int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
   available = true;
@@ -409,81 +434,76 @@
   _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
-bool FileAudioDevice::PlayThreadFunc(void* pThis)
-{
-    return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
+bool FileAudioDevice::PlayThreadFunc(void* pThis) {
+  return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
 }
 
-bool FileAudioDevice::RecThreadFunc(void* pThis)
-{
-    return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
+bool FileAudioDevice::RecThreadFunc(void* pThis) {
+  return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
 }
 
-bool FileAudioDevice::PlayThreadProcess()
-{
-    if (!_playing) {
-        return false;
-    }
-    int64_t currentTime = rtc::TimeMillis();
-    _critSect.Enter();
+bool FileAudioDevice::PlayThreadProcess() {
+  if (!_playing) {
+    return false;
+  }
+  int64_t currentTime = rtc::TimeMillis();
+  _critSect.Enter();
 
-    if (_lastCallPlayoutMillis == 0 ||
-        currentTime - _lastCallPlayoutMillis >= 10) {
-        _critSect.Leave();
-        _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
-        _critSect.Enter();
-
-        _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
-        RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
-        if (_outputFile.is_open()) {
-          _outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
-        }
-        _lastCallPlayoutMillis = currentTime;
-    }
-    _playoutFramesLeft = 0;
+  if (_lastCallPlayoutMillis == 0 ||
+      currentTime - _lastCallPlayoutMillis >= 10) {
     _critSect.Leave();
-
-    int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
-    if (deltaTimeMillis < 10) {
-      SleepMs(10 - deltaTimeMillis);
-    }
-
-    return true;
-}
-
-bool FileAudioDevice::RecThreadProcess()
-{
-    if (!_recording) {
-        return false;
-    }
-
-    int64_t currentTime = rtc::TimeMillis();
+    _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
     _critSect.Enter();
 
-    if (_lastCallRecordMillis == 0 ||
-        currentTime - _lastCallRecordMillis >= 10) {
-      if (_inputFile.is_open()) {
-        if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
-          _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
-                                             _recordingFramesIn10MS);
-        } else {
-          _inputFile.Rewind();
-        }
-        _lastCallRecordMillis = currentTime;
-        _critSect.Leave();
-        _ptrAudioBuffer->DeliverRecordedData();
-        _critSect.Enter();
+    _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+    RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
+    if (_outputFile.is_open()) {
+      _outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
+    }
+    _lastCallPlayoutMillis = currentTime;
+  }
+  _playoutFramesLeft = 0;
+  _critSect.Leave();
+
+  int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+  if (deltaTimeMillis < 10) {
+    SleepMs(10 - deltaTimeMillis);
+  }
+
+  return true;
+}
+
+bool FileAudioDevice::RecThreadProcess() {
+  if (!_recording) {
+    return false;
+  }
+
+  int64_t currentTime = rtc::TimeMillis();
+  _critSect.Enter();
+
+  if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) {
+    if (_inputFile.is_open()) {
+      if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
+        _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+                                           _recordingFramesIn10MS);
+      } else {
+        _inputFile.Rewind();
       }
+      _lastCallRecordMillis = currentTime;
+      _critSect.Leave();
+      _ptrAudioBuffer->DeliverRecordedData();
+      _critSect.Enter();
     }
+  }
 
-    _critSect.Leave();
+  _critSect.Leave();
 
-    int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
-    if (deltaTimeMillis < 10) {
-      SleepMs(10 - deltaTimeMillis);
-    }
+  int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
+  if (deltaTimeMillis < 10) {
+    SleepMs(10 - deltaTimeMillis);
+  }
 
-    return true;
+  return true;
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_device/dummy/file_audio_device_factory.cc b/modules/audio_device/dummy/file_audio_device_factory.cc
index 96e3eaf..1739953 100644
--- a/modules/audio_device/dummy/file_audio_device_factory.cc
+++ b/modules/audio_device/dummy/file_audio_device_factory.cc
@@ -36,7 +36,8 @@
 }
 
 void FileAudioDeviceFactory::SetFilenamesToUse(
-    const char* inputAudioFilename, const char* outputAudioFilename) {
+    const char* inputAudioFilename,
+    const char* outputAudioFilename) {
 #ifdef WEBRTC_DUMMY_FILE_DEVICES
   RTC_DCHECK_LT(strlen(inputAudioFilename), MAX_FILENAME_LEN);
   RTC_DCHECK_LT(strlen(outputAudioFilename), MAX_FILENAME_LEN);
@@ -47,8 +48,9 @@
   _isConfigured = true;
 #else
   // Sanity: must be compiled with the right define to run this.
-  printf("Trying to use dummy file devices, but is not compiled "
-         "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
+  printf(
+      "Trying to use dummy file devices, but is not compiled "
+      "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
   std::exit(1);
 #endif
 }
diff --git a/modules/audio_device/ios/audio_device_ios.mm b/modules/audio_device/ios/audio_device_ios.mm
index 07d4660..d0a9be5 100644
--- a/modules/audio_device/ios/audio_device_ios.mm
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -34,7 +34,6 @@
 #import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
 #import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
 
-
 namespace webrtc {
 
 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
@@ -56,7 +55,6 @@
     }                                          \
   } while (0)
 
-
 // Hardcoded delay estimates based on real measurements.
 // TODO(henrika): these value is not used in combination with built-in AEC.
 // Can most likely be removed.
@@ -93,8 +91,8 @@
     LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
     LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
     LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
-#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \
-    && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
+#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
+    __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
     LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
 #endif
 #if TARGET_IPHONE_SIMULATOR
@@ -121,8 +119,7 @@
   LOGI() << "ctor" << ios::GetCurrentThreadDescription();
   io_thread_checker_.DetachFromThread();
   thread_ = rtc::Thread::Current();
-  audio_session_observer_ =
-      [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
+  audio_session_observer_ = [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
 }
 
 AudioDeviceIOS::~AudioDeviceIOS() {
@@ -152,12 +149,9 @@
   // here. They have not been set and confirmed yet since configureForWebRTC
   // is not called until audio is about to start. However, it makes sense to
   // store the parameters now and then verify at a later stage.
-  RTCAudioSessionConfiguration* config =
-      [RTCAudioSessionConfiguration webRTCConfiguration];
-  playout_parameters_.reset(config.sampleRate,
-                            config.outputNumberOfChannels);
-  record_parameters_.reset(config.sampleRate,
-                           config.inputNumberOfChannels);
+  RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
+  playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
+  record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
   // Ensure that the audio device buffer (ADB) knows about the internal audio
   // parameters. Note that, even if we are unable to get a mono audio session,
   // we will always tell the I/O audio unit to do a channel format conversion
@@ -235,8 +229,7 @@
   if (fine_audio_buffer_) {
     fine_audio_buffer_->ResetPlayout();
   }
-  if (!recording_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+  if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
     if (!audio_unit_->Start()) {
       RTCLogError(@"StartPlayout failed to start audio unit.");
       return -1;
@@ -269,9 +262,8 @@
     average_number_of_playout_callbacks_between_glitches =
         num_playout_callbacks_ / num_detected_playout_glitches_;
   }
-  RTC_HISTOGRAM_COUNTS_100000(
-      "WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
-      average_number_of_playout_callbacks_between_glitches);
+  RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
+                              average_number_of_playout_callbacks_between_glitches);
   RTCLog(@"Average number of playout callbacks between glitches: %d",
          average_number_of_playout_callbacks_between_glitches);
   return 0;
@@ -286,8 +278,7 @@
   if (fine_audio_buffer_) {
     fine_audio_buffer_->ResetRecord();
   }
-  if (!playing_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+  if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
     if (!audio_unit_->Start()) {
       RTCLogError(@"StartRecording failed to start audio unit.");
       return -1;
@@ -333,9 +324,8 @@
     options = AVAudioSessionCategoryOptionDefaultToSpeaker;
   }
   NSError* error = nil;
-  BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
-                          withOptions:options
-                                error:&error];
+  BOOL success =
+      [session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:options error:&error];
   ios::CheckAndLogError(success, error);
   [session unlockForConfiguration];
   return (error == nil) ? 0 : -1;
@@ -389,7 +379,9 @@
 
 void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
   RTC_DCHECK(thread_);
-  thread_->Post(RTC_FROM_HERE, this, kMessageTypeCanPlayOrRecordChange,
+  thread_->Post(RTC_FROM_HERE,
+                this,
+                kMessageTypeCanPlayOrRecordChange,
                 new rtc::TypedMessageData<bool>(can_play_or_record));
 }
 
@@ -406,11 +398,9 @@
   RTC_DCHECK_RUN_ON(&io_thread_checker_);
   OSStatus result = noErr;
   // Simply return if recording is not enabled.
-  if (!rtc::AtomicOps::AcquireLoad(&recording_))
-    return result;
+  if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
 
-  const size_t num_bytes =
-      num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
+  const size_t num_bytes = num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
   // Set the size of our own audio buffer and clear it first to avoid copying
   // in combination with potential reallocations.
   // On real iOS devices, the size will only be set once (at first callback).
@@ -435,8 +425,7 @@
   // We can make the audio unit provide a buffer instead in io_data, but we
   // currently just use our own.
   // TODO(henrika): should error handling be improved?
-  result = audio_unit_->Render(
-      flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
+  result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
   if (result != noErr) {
     RTCLogError(@"Failed to render audio.");
     return result;
@@ -445,9 +434,8 @@
   // Get a pointer to the recorded audio and send it to the WebRTC ADB.
   // Use the FineAudioBuffer instance to convert between native buffer size
   // and the 10ms buffer size used by WebRTC.
-  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_,
-                                          kFixedPlayoutDelayEstimate,
-                                          kFixedRecordDelayEstimate);
+  fine_audio_buffer_->DeliverRecordedData(
+      record_audio_buffer_, kFixedPlayoutDelayEstimate, kFixedRecordDelayEstimate);
   return noErr;
 }
 
@@ -465,8 +453,7 @@
   // Get pointer to internal audio buffer to which new audio data shall be
   // written.
   const size_t size_in_bytes = audio_buffer->mDataByteSize;
-  RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
-               num_frames);
+  RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
   int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData);
   // Produce silence and give audio unit a hint about it if playout is not
   // activated.
@@ -508,12 +495,11 @@
   // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
   // the native I/O audio unit) and copy the result to the audio buffer in the
   // |io_data| destination.
-  fine_audio_buffer_->GetPlayoutData(
-      rtc::ArrayView<int8_t>(destination, size_in_bytes));
+  fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
   return noErr;
 }
 
-void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
+void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
   switch (msg->message_id) {
     case kMessageTypeInterruptionBegin:
       HandleInterruptionBegin();
@@ -525,8 +511,7 @@
       HandleValidRouteChange();
       break;
     case kMessageTypeCanPlayOrRecordChange: {
-      rtc::TypedMessageData<bool>* data =
-          static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
+      rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
       HandleCanPlayOrRecordChange(data->data());
       delete data;
       break;
@@ -542,10 +527,8 @@
 
 void AudioDeviceIOS::HandleInterruptionBegin() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
-  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.",
-         is_interrupted_);
-  if (audio_unit_ &&
-      audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
+  if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
     RTCLog(@"Stopping the audio unit due to interruption begin.");
     if (!audio_unit_->Stop()) {
       RTCLogError(@"Failed to stop the audio unit for interruption begin.");
@@ -566,7 +549,8 @@
 void AudioDeviceIOS::HandleInterruptionEnd() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
-         "Updating audio unit state.", is_interrupted_);
+          "Updating audio unit state.",
+         is_interrupted_);
   is_interrupted_ = false;
   UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
 }
@@ -589,15 +573,13 @@
 
   // Don't do anything if we're interrupted.
   if (is_interrupted_) {
-    RTCLog(@"Ignoring sample rate change to %f due to interruption.",
-           sample_rate);
+    RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
     return;
   }
 
   // If we don't have an audio unit yet, or the audio unit is uninitialized,
   // there is no work to do.
-  if (!audio_unit_ ||
-      audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
+  if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
     return;
   }
 
@@ -609,8 +591,7 @@
   const size_t session_frames_per_buffer =
       static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
   const double current_sample_rate = playout_parameters_.sample_rate();
-  const size_t current_frames_per_buffer =
-      playout_parameters_.frames_per_buffer();
+  const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
   RTCLog(@"Handling playout sample rate change to: %f\n"
           "  Session sample rate: %f frames_per_buffer: %lu\n"
           "  ADM sample rate: %f frames_per_buffer: %lu",
@@ -652,15 +633,13 @@
   // Initialize the audio unit again with the new sample rate.
   RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
   if (!audio_unit_->Initialize(session_sample_rate)) {
-    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
-                session_sample_rate);
+    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
     return;
   }
 
   // Restart the audio unit if it was already running.
   if (restart_audio_unit && !audio_unit_->Start()) {
-    RTCLogError(@"Failed to start audio unit with sample rate: %f",
-                session_sample_rate);
+    RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
     return;
   }
   RTCLog(@"Successfully handled sample rate change.");
@@ -682,8 +661,7 @@
     return;
   }
   num_detected_playout_glitches_++;
-  RTCLog(@"Number of detected playout glitches: %lld",
-         num_detected_playout_glitches_);
+  RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
 
   int64_t glitch_count = num_detected_playout_glitches_;
   dispatch_async(dispatch_get_main_queue(), ^{
@@ -712,8 +690,7 @@
   // Inform the audio device buffer (ADB) about the new audio format.
   audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
   audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
-  audio_device_buffer_->SetRecordingSampleRate(
-      record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
   audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
 }
 
@@ -729,8 +706,7 @@
   // hardware sample rate but continue and use the non-ideal sample rate after
   // reinitializing the audio parameters. Most BT headsets only support 8kHz or
   // 16kHz.
-  RTCAudioSessionConfiguration* webRTCConfig =
-      [RTCAudioSessionConfiguration webRTCConfiguration];
+  RTCAudioSessionConfiguration* webRTCConfig = [RTCAudioSessionConfiguration webRTCConfiguration];
   if (sample_rate != webRTCConfig.sampleRate) {
     LOG(LS_WARNING) << "Unable to set the preferred sample rate";
   }
@@ -740,18 +716,13 @@
   // number of audio frames.
   // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
   // Hence, 128 is the size we expect to see in upcoming render callbacks.
-  playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
-                            io_buffer_duration);
+  playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
   RTC_DCHECK(playout_parameters_.is_complete());
-  record_parameters_.reset(sample_rate, record_parameters_.channels(),
-                           io_buffer_duration);
+  record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
   RTC_DCHECK(record_parameters_.is_complete());
-  LOG(LS_INFO) << " frames per I/O buffer: "
-               << playout_parameters_.frames_per_buffer();
-  LOG(LS_INFO) << " bytes per I/O buffer: "
-               << playout_parameters_.GetBytesPerBuffer();
-  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
-                record_parameters_.GetBytesPerBuffer());
+  LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
+  LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
+  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
 
   // Update the ADB parameters since the sample rate might have changed.
   UpdateAudioDeviceBuffer();
@@ -781,7 +752,8 @@
 void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
-         can_play_or_record, is_interrupted_);
+         can_play_or_record,
+         is_interrupted_);
 
   if (is_interrupted_) {
     RTCLog(@"Ignoring audio unit update due to interruption.");
@@ -790,8 +762,7 @@
 
   // If we're not initialized we don't need to do anything. Audio unit will
   // be initialized on initialization.
-  if (!audio_is_initialized_)
-    return;
+  if (!audio_is_initialized_) return;
 
   // If we're initialized, we must have an audio unit.
   RTC_DCHECK(audio_unit_);
@@ -809,13 +780,11 @@
     case VoiceProcessingAudioUnit::kUninitialized:
       RTCLog(@"VPAU state: Uninitialized");
       should_initialize_audio_unit = can_play_or_record;
-      should_start_audio_unit = should_initialize_audio_unit &&
-          (playing_ || recording_);
+      should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
       break;
     case VoiceProcessingAudioUnit::kInitialized:
       RTCLog(@"VPAU state: Initialized");
-      should_start_audio_unit =
-          can_play_or_record && (playing_ || recording_);
+      should_start_audio_unit = can_play_or_record && (playing_ || recording_);
       should_uninitialize_audio_unit = !can_play_or_record;
       break;
     case VoiceProcessingAudioUnit::kStarted:
@@ -916,8 +885,7 @@
   NSError* error = nil;
   if (![session beginWebRTCSession:&error]) {
     [session unlockForConfiguration];
-    RTCLogError(@"Failed to begin WebRTC session: %@",
-                error.localizedDescription);
+    RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
     return false;
   }
 
diff --git a/modules/audio_device/ios/audio_device_not_implemented_ios.mm b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
index 6dfc02b..4de2653 100644
--- a/modules/audio_device/ios/audio_device_not_implemented_ios.mm
+++ b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
@@ -15,8 +15,7 @@
 
 namespace webrtc {
 
-int32_t AudioDeviceIOS::ActiveAudioLayer(
-    AudioDeviceModule::AudioLayer& audioLayer) const {
+int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
   audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
   return 0;
 }
@@ -199,8 +198,7 @@
   return 0;
 }
 
-int32_t AudioDeviceIOS::SetRecordingDevice(
-    AudioDeviceModule::WindowsDeviceType) {
+int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
   RTC_NOTREACHED() << "Not implemented";
   return -1;
 }
diff --git a/modules/audio_device/linux/audio_device_alsa_linux.cc b/modules/audio_device/linux/audio_device_alsa_linux.cc
index 0a98efd..0cc484f 100644
--- a/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -26,119 +26,115 @@
 
 // Redefine these here to be able to do late-binding
 #undef snd_ctl_card_info_alloca
-#define snd_ctl_card_info_alloca(ptr) \
-        do { *ptr = (snd_ctl_card_info_t *) \
-            __builtin_alloca (LATE(snd_ctl_card_info_sizeof)()); \
-            memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); } while (0)
+#define snd_ctl_card_info_alloca(ptr)                  \
+  do {                                                 \
+    *ptr = (snd_ctl_card_info_t*)__builtin_alloca(     \
+        LATE(snd_ctl_card_info_sizeof)());             \
+    memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); \
+  } while (0)
 
 #undef snd_pcm_info_alloca
-#define snd_pcm_info_alloca(pInfo) \
-       do { *pInfo = (snd_pcm_info_t *) \
-       __builtin_alloca (LATE(snd_pcm_info_sizeof)()); \
-       memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); } while (0)
+#define snd_pcm_info_alloca(pInfo)                                           \
+  do {                                                                       \
+    *pInfo = (snd_pcm_info_t*)__builtin_alloca(LATE(snd_pcm_info_sizeof)()); \
+    memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)());                          \
+  } while (0)
 
 // snd_lib_error_handler_t
-void WebrtcAlsaErrorHandler(const char *file,
-                          int line,
-                          const char *function,
-                          int err,
-                          const char *fmt,...){};
+void WebrtcAlsaErrorHandler(const char* file,
+                            int line,
+                            const char* function,
+                            int err,
+                            const char* fmt,
+                            ...){};
 
-namespace webrtc
-{
+namespace webrtc {
 static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
 static const unsigned int ALSA_PLAYOUT_CH = 2;
-static const unsigned int ALSA_PLAYOUT_LATENCY = 40*1000; // in us
+static const unsigned int ALSA_PLAYOUT_LATENCY = 40 * 1000;  // in us
 static const unsigned int ALSA_CAPTURE_FREQ = 48000;
 static const unsigned int ALSA_CAPTURE_CH = 2;
-static const unsigned int ALSA_CAPTURE_LATENCY = 40*1000; // in us
-static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
+static const unsigned int ALSA_CAPTURE_LATENCY = 40 * 1000;  // in us
+static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5;     // in ms
 
 #define FUNC_GET_NUM_OF_DEVICE 0
 #define FUNC_GET_DEVICE_NAME 1
 #define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
 
-AudioDeviceLinuxALSA::AudioDeviceLinuxALSA() :
-    _ptrAudioBuffer(NULL),
-    _inputDeviceIndex(0),
-    _outputDeviceIndex(0),
-    _inputDeviceIsSpecified(false),
-    _outputDeviceIsSpecified(false),
-    _handleRecord(NULL),
-    _handlePlayout(NULL),
-    _recordingBuffersizeInFrame(0),
-    _recordingPeriodSizeInFrame(0),
-    _playoutBufferSizeInFrame(0),
-    _playoutPeriodSizeInFrame(0),
-    _recordingBufferSizeIn10MS(0),
-    _playoutBufferSizeIn10MS(0),
-    _recordingFramesIn10MS(0),
-    _playoutFramesIn10MS(0),
-    _recordingFreq(ALSA_CAPTURE_FREQ),
-    _playoutFreq(ALSA_PLAYOUT_FREQ),
-    _recChannels(ALSA_CAPTURE_CH),
-    _playChannels(ALSA_PLAYOUT_CH),
-    _recordingBuffer(NULL),
-    _playoutBuffer(NULL),
-    _recordingFramesLeft(0),
-    _playoutFramesLeft(0),
-    _initialized(false),
-    _recording(false),
-    _playing(false),
-    _recIsInitialized(false),
-    _playIsInitialized(false),
-    _AGC(false),
-    _recordingDelay(0),
-    _playoutDelay(0)
-{
-    memset(_oldKeyState, 0, sizeof(_oldKeyState));
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioDeviceLinuxALSA::AudioDeviceLinuxALSA()
+    : _ptrAudioBuffer(NULL),
+      _inputDeviceIndex(0),
+      _outputDeviceIndex(0),
+      _inputDeviceIsSpecified(false),
+      _outputDeviceIsSpecified(false),
+      _handleRecord(NULL),
+      _handlePlayout(NULL),
+      _recordingBuffersizeInFrame(0),
+      _recordingPeriodSizeInFrame(0),
+      _playoutBufferSizeInFrame(0),
+      _playoutPeriodSizeInFrame(0),
+      _recordingBufferSizeIn10MS(0),
+      _playoutBufferSizeIn10MS(0),
+      _recordingFramesIn10MS(0),
+      _playoutFramesIn10MS(0),
+      _recordingFreq(ALSA_CAPTURE_FREQ),
+      _playoutFreq(ALSA_PLAYOUT_FREQ),
+      _recChannels(ALSA_CAPTURE_CH),
+      _playChannels(ALSA_PLAYOUT_CH),
+      _recordingBuffer(NULL),
+      _playoutBuffer(NULL),
+      _recordingFramesLeft(0),
+      _playoutFramesLeft(0),
+      _initialized(false),
+      _recording(false),
+      _playing(false),
+      _recIsInitialized(false),
+      _playIsInitialized(false),
+      _AGC(false),
+      _recordingDelay(0),
+      _playoutDelay(0) {
+  memset(_oldKeyState, 0, sizeof(_oldKeyState));
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 }
 
 // ----------------------------------------------------------------------------
 //  AudioDeviceLinuxALSA - dtor
 // ----------------------------------------------------------------------------
 
-AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Terminate();
+  Terminate();
 
-    // Clean up the recording buffer and playout buffer.
-    if (_recordingBuffer)
-    {
-        delete [] _recordingBuffer;
-        _recordingBuffer = NULL;
-    }
-    if (_playoutBuffer)
-    {
-        delete [] _playoutBuffer;
-        _playoutBuffer = NULL;
-    }
+  // Clean up the recording buffer and playout buffer.
+  if (_recordingBuffer) {
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
+  }
+  if (_playoutBuffer) {
+    delete[] _playoutBuffer;
+    _playoutBuffer = NULL;
+  }
 }
 
-void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
-{
+void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  _ptrAudioBuffer = audioBuffer;
 
-    _ptrAudioBuffer = audioBuffer;
-
-    // Inform the AudioBuffer about default settings for this implementation.
-    // Set all values to zero here since the actual settings will be done by
-    // InitPlayout and InitRecording later.
-    _ptrAudioBuffer->SetRecordingSampleRate(0);
-    _ptrAudioBuffer->SetPlayoutSampleRate(0);
-    _ptrAudioBuffer->SetRecordingChannels(0);
-    _ptrAudioBuffer->SetPlayoutChannels(0);
+  // Inform the AudioBuffer about default settings for this implementation.
+  // Set all values to zero here since the actual settings will be done by
+  // InitPlayout and InitRecording later.
+  _ptrAudioBuffer->SetRecordingSampleRate(0);
+  _ptrAudioBuffer->SetPlayoutSampleRate(0);
+  _ptrAudioBuffer->SetRecordingChannels(0);
+  _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
 int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
-    AudioDeviceModule::AudioLayer& audioLayer) const
-{
-    audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
-    return 0;
+    AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
+  return 0;
 }
 
 AudioDeviceGeneric::InitStatus AudioDeviceLinuxALSA::Init() {
@@ -155,654 +151,541 @@
     return InitStatus::OK;
   }
 #if defined(USE_X11)
-    //Get X display handle for typing detection
-    _XDisplay = XOpenDisplay(NULL);
-    if (!_XDisplay) {
-      LOG(LS_WARNING)
-          << "failed to open X display, typing detection will not work";
-    }
+  // Get X display handle for typing detection
+  _XDisplay = XOpenDisplay(NULL);
+  if (!_XDisplay) {
+    LOG(LS_WARNING)
+        << "failed to open X display, typing detection will not work";
+  }
 #endif
 
-    _initialized = true;
+  _initialized = true;
 
-    return InitStatus::OK;
+  return InitStatus::OK;
 }
 
-int32_t AudioDeviceLinuxALSA::Terminate()
-{
-    if (!_initialized)
-    {
-        return 0;
-    }
+int32_t AudioDeviceLinuxALSA::Terminate() {
+  if (!_initialized) {
+    return 0;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    _mixerManager.Close();
+  _mixerManager.Close();
 
-    // RECORDING
-    if (_ptrThreadRec)
-    {
-        rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
-        _critSect.Leave();
+  // RECORDING
+  if (_ptrThreadRec) {
+    rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
+    _critSect.Leave();
 
-        tmpThread->Stop();
-        delete tmpThread;
+    tmpThread->Stop();
+    delete tmpThread;
 
-        _critSect.Enter();
-    }
+    _critSect.Enter();
+  }
 
-    // PLAYOUT
-    if (_ptrThreadPlay)
-    {
-        rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
-        _critSect.Leave();
+  // PLAYOUT
+  if (_ptrThreadPlay) {
+    rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
+    _critSect.Leave();
 
-        tmpThread->Stop();
-        delete tmpThread;
+    tmpThread->Stop();
+    delete tmpThread;
 
-        _critSect.Enter();
-    }
+    _critSect.Enter();
+  }
 #if defined(USE_X11)
-    if (_XDisplay)
-    {
-      XCloseDisplay(_XDisplay);
-      _XDisplay = NULL;
-    }
+  if (_XDisplay) {
+    XCloseDisplay(_XDisplay);
+    _XDisplay = NULL;
+  }
 #endif
-    _initialized = false;
-    _outputDeviceIsSpecified = false;
-    _inputDeviceIsSpecified = false;
+  _initialized = false;
+  _outputDeviceIsSpecified = false;
+  _inputDeviceIsSpecified = false;
 
+  return 0;
+}
+
+bool AudioDeviceLinuxALSA::Initialized() const {
+  return (_initialized);
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeaker() {
+  rtc::CritScope lock(&_critSect);
+
+  if (_playing) {
+    return -1;
+  }
+
+  char devName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+  return _mixerManager.OpenSpeaker(devName);
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophone() {
+  rtc::CritScope lock(&_critSect);
+
+  if (_recording) {
+    return -1;
+  }
+
+  char devName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+  return _mixerManager.OpenMicrophone(devName);
+}
+
+bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const {
+  return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const {
+  return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+  bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+  // Make an attempt to open up the
+  // output mixer corresponding to the currently selected output device.
+  if (!wasInitialized && InitSpeaker() == -1) {
+    // If we end up here it means that the selected speaker has no volume
+    // control.
+    available = false;
     return 0;
+  }
+
+  // Given that InitSpeaker was successful, we know that a volume control
+  // exists
+  available = true;
+
+  // Close the initialized output mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseSpeaker();
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Initialized() const
-{
-    return (_initialized);
+int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+  return (_mixerManager.SetSpeakerVolume(volume));
 }
 
-int32_t AudioDeviceLinuxALSA::InitSpeaker()
-{
+int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+  uint32_t level(0);
 
-    rtc::CritScope lock(&_critSect);
+  if (_mixerManager.SpeakerVolume(level) == -1) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
-    }
+  volume = level;
 
-    char devName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
-    return _mixerManager.OpenSpeaker(devName);
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::InitMicrophone()
-{
+int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  uint32_t maxVol(0);
 
-    rtc::CritScope lock(&_critSect);
+  if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
-    }
+  maxVolume = maxVol;
 
-    char devName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
-    return _mixerManager.OpenMicrophone(devName);
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const
-{
-    return (_mixerManager.SpeakerIsInitialized());
+int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(uint32_t& minVolume) const {
+  uint32_t minVol(0);
+
+  if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+    return -1;
+  }
+
+  minVolume = minVol;
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const
-{
-    return (_mixerManager.MicrophoneIsInitialized());
-}
+int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+  bool isAvailable(false);
+  bool wasInitialized = _mixerManager.SpeakerIsInitialized();
 
-int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available)
-{
-
-    bool wasInitialized = _mixerManager.SpeakerIsInitialized();
-
-    // Make an attempt to open up the
-    // output mixer corresponding to the currently selected output device.
-    if (!wasInitialized && InitSpeaker() == -1)
-    {
-        // If we end up here it means that the selected speaker has no volume
-        // control.
-        available = false;
-        return 0;
-    }
-
-    // Given that InitSpeaker was successful, we know that a volume control
-    // exists
-    available = true;
-
-    // Close the initialized output mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseSpeaker();
-    }
-
+  // Make an attempt to open up the
+  // output mixer corresponding to the currently selected output device.
+  //
+  if (!wasInitialized && InitSpeaker() == -1) {
+    // If we end up here it means that the selected speaker has no volume
+    // control, hence it is safe to state that there is no mute control
+    // already at this stage.
+    available = false;
     return 0;
+  }
+
+  // Check if the selected speaker has a mute control
+  _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+  available = isAvailable;
+
+  // Close the initialized output mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseSpeaker();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume)
-{
-
-    return (_mixerManager.SetSpeakerVolume(volume));
+int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) {
+  return (_mixerManager.SetSpeakerMute(enable));
 }
 
-int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const {
+  bool muted(0);
 
-    uint32_t level(0);
+  if (_mixerManager.SpeakerMute(muted) == -1) {
+    return -1;
+  }
 
-    if (_mixerManager.SpeakerVolume(level) == -1)
-    {
-        return -1;
-    }
+  enabled = muted;
 
-    volume = level;
+  return 0;
+}
 
+int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+  bool isAvailable(false);
+  bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+  // Make an attempt to open up the
+  // input mixer corresponding to the currently selected input device.
+  //
+  if (!wasInitialized && InitMicrophone() == -1) {
+    // If we end up here it means that the selected microphone has no volume
+    // control, hence it is safe to state that there is no mute control
+    // already at this stage.
+    available = false;
     return 0;
+  }
+
+  // Check if the selected microphone has a mute control
+  //
+  _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+  available = isAvailable;
+
+  // Close the initialized input mixer
+  //
+  if (!wasInitialized) {
+    _mixerManager.CloseMicrophone();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(
-    uint32_t& maxVolume) const
-{
-
-    uint32_t maxVol(0);
-
-    if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
-    {
-        return -1;
-    }
-
-    maxVolume = maxVol;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(
-    uint32_t& minVolume) const
-{
-
-    uint32_t minVol(0);
-
-    if (_mixerManager.MinSpeakerVolume(minVol) == -1)
-    {
-        return -1;
-    }
-
-    minVolume = minVol;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available)
-{
-
-    bool isAvailable(false);
-    bool wasInitialized = _mixerManager.SpeakerIsInitialized();
-
-    // Make an attempt to open up the
-    // output mixer corresponding to the currently selected output device.
-    //
-    if (!wasInitialized && InitSpeaker() == -1)
-    {
-        // If we end up here it means that the selected speaker has no volume
-        // control, hence it is safe to state that there is no mute control
-        // already at this stage.
-        available = false;
-        return 0;
-    }
-
-    // Check if the selected speaker has a mute control
-    _mixerManager.SpeakerMuteIsAvailable(isAvailable);
-
-    available = isAvailable;
-
-    // Close the initialized output mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseSpeaker();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable)
-{
-    return (_mixerManager.SetSpeakerMute(enable));
-}
-
-int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const
-{
-
-    bool muted(0);
-
-    if (_mixerManager.SpeakerMute(muted) == -1)
-    {
-        return -1;
-    }
-
-    enabled = muted;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available)
-{
-
-    bool isAvailable(false);
-    bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
-    // Make an attempt to open up the
-    // input mixer corresponding to the currently selected input device.
-    //
-    if (!wasInitialized && InitMicrophone() == -1)
-    {
-        // If we end up here it means that the selected microphone has no volume
-        // control, hence it is safe to state that there is no mute control
-        // already at this stage.
-        available = false;
-        return 0;
-    }
-
-    // Check if the selected microphone has a mute control
-    //
-    _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
-    available = isAvailable;
-
-    // Close the initialized input mixer
-    //
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseMicrophone();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable)
-{
-    return (_mixerManager.SetMicrophoneMute(enable));
+int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) {
+  return (_mixerManager.SetMicrophoneMute(enable));
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const {
+  bool muted(0);
 
-    bool muted(0);
+  if (_mixerManager.MicrophoneMute(muted) == -1) {
+    return -1;
+  }
 
-    if (_mixerManager.MicrophoneMute(muted) == -1)
-    {
-        return -1;
-    }
-
-    enabled = muted;
-    return 0;
+  enabled = muted;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available)
-{
+int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
-
-    // If we already have initialized in stereo it's obviously available
-    if (_recIsInitialized && (2 == _recChannels))
-    {
-        available = true;
-        return 0;
-    }
-
-    // Save rec states and the number of rec channels
-    bool recIsInitialized = _recIsInitialized;
-    bool recording = _recording;
-    int recChannels = _recChannels;
-
-    available = false;
-
-    // Stop/uninitialize recording if initialized (and possibly started)
-    if (_recIsInitialized)
-    {
-        StopRecording();
-    }
-
-    // Try init in stereo;
-    _recChannels = 2;
-    if (InitRecording() == 0)
-    {
-        available = true;
-    }
-
-    // Stop/uninitialize recording
-    StopRecording();
-
-    // Recover previous states
-    _recChannels = recChannels;
-    if (recIsInitialized)
-    {
-        InitRecording();
-    }
-    if (recording)
-    {
-        StartRecording();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable)
-{
-
-    if (enable)
-        _recChannels = 2;
-    else
-        _recChannels = 1;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const
-{
-
-    if (_recChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available)
-{
-
-    rtc::CritScope lock(&_critSect);
-
-    // If we already have initialized in stereo it's obviously available
-    if (_playIsInitialized && (2 == _playChannels))
-    {
-        available = true;
-        return 0;
-    }
-
-    // Save rec states and the number of rec channels
-    bool playIsInitialized = _playIsInitialized;
-    bool playing = _playing;
-    int playChannels = _playChannels;
-
-    available = false;
-
-    // Stop/uninitialize recording if initialized (and possibly started)
-    if (_playIsInitialized)
-    {
-        StopPlayout();
-    }
-
-    // Try init in stereo;
-    _playChannels = 2;
-    if (InitPlayout() == 0)
-    {
-        available = true;
-    }
-
-    // Stop/uninitialize recording
-    StopPlayout();
-
-    // Recover previous states
-    _playChannels = playChannels;
-    if (playIsInitialized)
-    {
-        InitPlayout();
-    }
-    if (playing)
-    {
-        StartPlayout();
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable)
-{
-
-    if (enable)
-        _playChannels = 2;
-    else
-        _playChannels = 1;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const
-{
-
-    if (_playChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::SetAGC(bool enable)
-{
-
-    _AGC = enable;
-
-    return 0;
-}
-
-bool AudioDeviceLinuxALSA::AGC() const
-{
-
-    return _AGC;
-}
-
-int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available)
-{
-
-    bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
-    // Make an attempt to open up the
-    // input mixer corresponding to the currently selected output device.
-    if (!wasInitialized && InitMicrophone() == -1)
-    {
-        // If we end up here it means that the selected microphone has no volume
-        // control.
-        available = false;
-        return 0;
-    }
-
-    // Given that InitMicrophone was successful, we know that a volume control
-    // exists
+  // If we already have initialized in stereo it's obviously available
+  if (_recIsInitialized && (2 == _recChannels)) {
     available = true;
-
-    // Close the initialized input mixer
-    if (!wasInitialized)
-    {
-        _mixerManager.CloseMicrophone();
-    }
-
     return 0;
+  }
+
+  // Save rec states and the number of rec channels
+  bool recIsInitialized = _recIsInitialized;
+  bool recording = _recording;
+  int recChannels = _recChannels;
+
+  available = false;
+
+  // Stop/uninitialize recording if initialized (and possibly started)
+  if (_recIsInitialized) {
+    StopRecording();
+  }
+
+  // Try init in stereo;
+  _recChannels = 2;
+  if (InitRecording() == 0) {
+    available = true;
+  }
+
+  // Stop/uninitialize recording
+  StopRecording();
+
+  // Recover previous states
+  _recChannels = recChannels;
+  if (recIsInitialized) {
+    InitRecording();
+  }
+  if (recording) {
+    StartRecording();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume)
-{
+int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable) {
+  if (enable)
+    _recChannels = 2;
+  else
+    _recChannels = 1;
 
-    return (_mixerManager.SetMicrophoneVolume(volume));
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const {
+  if (_recChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    uint32_t level(0);
-
-    if (_mixerManager.MicrophoneVolume(level) == -1)
-    {
-        LOG(LS_WARNING) << "failed to retrive current microphone level";
-        return -1;
-    }
-
-    volume = level;
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(
-    uint32_t& maxVolume) const
-{
+int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    uint32_t maxVol(0);
-
-    if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
-    {
-        return -1;
-    }
-
-    maxVolume = maxVol;
-
+  // If we already have initialized in stereo it's obviously available
+  if (_playIsInitialized && (2 == _playChannels)) {
+    available = true;
     return 0;
+  }
+
+  // Save rec states and the number of rec channels
+  bool playIsInitialized = _playIsInitialized;
+  bool playing = _playing;
+  int playChannels = _playChannels;
+
+  available = false;
+
+  // Stop/uninitialize recording if initialized (and possibly started)
+  if (_playIsInitialized) {
+    StopPlayout();
+  }
+
+  // Try init in stereo;
+  _playChannels = 2;
+  if (InitPlayout() == 0) {
+    available = true;
+  }
+
+  // Stop/uninitialize recording
+  StopPlayout();
+
+  // Recover previous states
+  _playChannels = playChannels;
+  if (playIsInitialized) {
+    InitPlayout();
+  }
+  if (playing) {
+    StartPlayout();
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(
-    uint32_t& minVolume) const
-{
+int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) {
+  if (enable)
+    _playChannels = 2;
+  else
+    _playChannels = 1;
 
-    uint32_t minVol(0);
-
-    if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
-    {
-        return -1;
-    }
-
-    minVolume = minVol;
-
-    return 0;
+  return 0;
 }
 
-int16_t AudioDeviceLinuxALSA::PlayoutDevices()
-{
+int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const {
+  if (_playChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    return (int16_t)GetDevicesInfo(0, true);
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index)
-{
+int32_t AudioDeviceLinuxALSA::SetAGC(bool enable) {
+  _AGC = enable;
 
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+  return 0;
+}
 
-    uint32_t nDevices = GetDevicesInfo(0, true);
-    LOG(LS_VERBOSE) << "number of available audio output devices is "
-                    << nDevices;
+bool AudioDeviceLinuxALSA::AGC() const {
+  return _AGC;
+}
 
-    if (index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) {
+  bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
 
-    _outputDeviceIndex = index;
-    _outputDeviceIsSpecified = true;
-
+  // Make an attempt to open up the
+  // input mixer corresponding to the currently selected output device.
+  if (!wasInitialized && InitMicrophone() == -1) {
+    // If we end up here it means that the selected microphone has no volume
+    // control.
+    available = false;
     return 0;
+  }
+
+  // Given that InitMicrophone was successful, we know that a volume control
+  // exists
+  available = true;
+
+  // Close the initialized input mixer
+  if (!wasInitialized) {
+    _mixerManager.CloseMicrophone();
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+  return (_mixerManager.SetMicrophoneVolume(volume));
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+  uint32_t level(0);
+
+  if (_mixerManager.MicrophoneVolume(level) == -1) {
+    LOG(LS_WARNING) << "failed to retrive current microphone level";
+    return -1;
+  }
+
+  volume = level;
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  uint32_t maxVol(0);
+
+  if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+    return -1;
+  }
+
+  maxVolume = maxVol;
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(uint32_t& minVolume) const {
+  uint32_t minVol(0);
+
+  if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+    return -1;
+  }
+
+  minVolume = minVol;
+
+  return 0;
+}
+
+int16_t AudioDeviceLinuxALSA::PlayoutDevices() {
+  return (int16_t)GetDevicesInfo(0, true);
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index) {
+  if (_playIsInitialized) {
+    return -1;
+  }
+
+  uint32_t nDevices = GetDevicesInfo(0, true);
+  LOG(LS_VERBOSE) << "number of available audio output devices is " << nDevices;
+
+  if (index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
+
+  _outputDeviceIndex = index;
+  _outputDeviceIsSpecified = true;
+
+  return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
-    AudioDeviceModule::WindowsDeviceType /*device*/)
-{
-    LOG(LS_ERROR) << "WindowsDeviceType not supported";
-    return -1;
+    AudioDeviceModule::WindowsDeviceType /*device*/) {
+  LOG(LS_ERROR) << "WindowsDeviceType not supported";
+  return -1;
 }
 
 int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  const uint16_t nDevices(PlayoutDevices());
 
-    const uint16_t nDevices(PlayoutDevices());
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
-    }
+  memset(name, 0, kAdmMaxDeviceNameSize);
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
 
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
+  return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
 }
 
 int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  const uint16_t nDevices(RecordingDevices());
 
-    const uint16_t nDevices(RecordingDevices());
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
-    }
+  memset(name, 0, kAdmMaxDeviceNameSize);
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
 
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+  return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
 }
 
-int16_t AudioDeviceLinuxALSA::RecordingDevices()
-{
-
-    return (int16_t)GetDevicesInfo(0, false);
+int16_t AudioDeviceLinuxALSA::RecordingDevices() {
+  return (int16_t)GetDevicesInfo(0, false);
 }
 
-int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index)
-{
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+  uint32_t nDevices = GetDevicesInfo(0, false);
+  LOG(LS_VERBOSE) << "number of availiable audio input devices is " << nDevices;
 
-    uint32_t nDevices = GetDevicesInfo(0, false);
-    LOG(LS_VERBOSE) << "number of availiable audio input devices is "
-                    << nDevices;
+  if (index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  _inputDeviceIndex = index;
+  _inputDeviceIsSpecified = true;
 
-    _inputDeviceIndex = index;
-    _inputDeviceIsSpecified = true;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -810,1124 +693,966 @@
 // ----------------------------------------------------------------------------
 
 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
-    AudioDeviceModule::WindowsDeviceType /*device*/)
-{
-    LOG(LS_ERROR) << "WindowsDeviceType not supported";
+    AudioDeviceModule::WindowsDeviceType /*device*/) {
+  LOG(LS_ERROR) << "WindowsDeviceType not supported";
+  return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) {
+  available = false;
+
+  // Try to initialize the playout side with mono
+  // Assumes that user set num channels after calling this function
+  _playChannels = 1;
+  int32_t res = InitPlayout();
+
+  // Cancel effect of initialization
+  StopPlayout();
+
+  if (res != -1) {
+    available = true;
+  } else {
+    // It may be possible to play out in stereo
+    res = StereoPlayoutIsAvailable(available);
+    if (available) {
+      // Then set channels to 2 so InitPlayout doesn't fail
+      _playChannels = 2;
+    }
+  }
+
+  return res;
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) {
+  available = false;
+
+  // Try to initialize the recording side with mono
+  // Assumes that user set num channels after calling this function
+  _recChannels = 1;
+  int32_t res = InitRecording();
+
+  // Cancel effect of initialization
+  StopRecording();
+
+  if (res != -1) {
+    available = true;
+  } else {
+    // It may be possible to record in stereo
+    res = StereoRecordingIsAvailable(available);
+    if (available) {
+      // Then set channels to 2 so InitPlayout doesn't fail
+      _recChannels = 2;
+    }
+  }
+
+  return res;
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayout() {
+  int errVal = 0;
+
+  rtc::CritScope lock(&_critSect);
+  if (_playing) {
     return -1;
-}
+  }
 
-int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available)
-{
+  if (!_outputDeviceIsSpecified) {
+    return -1;
+  }
 
-    available = false;
-
-    // Try to initialize the playout side with mono
-    // Assumes that user set num channels after calling this function
-    _playChannels = 1;
-    int32_t res = InitPlayout();
-
-    // Cancel effect of initialization
-    StopPlayout();
-
-    if (res != -1)
-    {
-        available = true;
-    }
-    else
-    {
-        // It may be possible to play out in stereo
-        res = StereoPlayoutIsAvailable(available);
-        if (available)
-        {
-            // Then set channels to 2 so InitPlayout doesn't fail
-            _playChannels = 2;
-        }
-    }
-
-    return res;
-}
-
-int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available)
-{
-
-    available = false;
-
-    // Try to initialize the recording side with mono
-    // Assumes that user set num channels after calling this function
-    _recChannels = 1;
-    int32_t res = InitRecording();
-
-    // Cancel effect of initialization
-    StopRecording();
-
-    if (res != -1)
-    {
-        available = true;
-    }
-    else
-    {
-        // It may be possible to record in stereo
-        res = StereoRecordingIsAvailable(available);
-        if (available)
-        {
-            // Then set channels to 2 so InitPlayout doesn't fail
-            _recChannels = 2;
-        }
-    }
-
-    return res;
-}
-
-int32_t AudioDeviceLinuxALSA::InitPlayout()
-{
-
-    int errVal = 0;
-
-    rtc::CritScope lock(&_critSect);
-    if (_playing)
-    {
-        return -1;
-    }
-
-    if (!_outputDeviceIsSpecified)
-    {
-        return -1;
-    }
-
-    if (_playIsInitialized)
-    {
-        return 0;
-    }
-    // Initialize the speaker (devices might have been added or removed)
-    if (InitSpeaker() == -1)
-    {
-        LOG(LS_WARNING) << "InitSpeaker() failed";
-    }
-
-    // Start by closing any existing wave-output devices
-    //
-    if (_handlePlayout != NULL)
-    {
-        LATE(snd_pcm_close)(_handlePlayout);
-        _handlePlayout = NULL;
-        _playIsInitialized = false;
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR)
-                << "Error closing current playout sound device, error: "
-                << LATE(snd_strerror)(errVal);
-        }
-    }
-
-    // Open PCM device for playout
-    char deviceName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
-                   kAdmMaxDeviceNameSize);
-
-    LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
-
-    errVal = LATE(snd_pcm_open)
-                 (&_handlePlayout,
-                  deviceName,
-                  SND_PCM_STREAM_PLAYBACK,
-                  SND_PCM_NONBLOCK);
-
-    if (errVal == -EBUSY) // Device busy - try some more!
-    {
-        for (int i=0; i < 5; i++)
-        {
-            SleepMs(1000);
-            errVal = LATE(snd_pcm_open)
-                         (&_handlePlayout,
-                          deviceName,
-                          SND_PCM_STREAM_PLAYBACK,
-                          SND_PCM_NONBLOCK);
-            if (errVal == 0)
-            {
-                break;
-            }
-        }
-    }
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "unable to open playback device: "
-                      << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
-        _handlePlayout = NULL;
-        return -1;
-    }
-
-    _playoutFramesIn10MS = _playoutFreq/100;
-    if ((errVal = LATE(snd_pcm_set_params)( _handlePlayout,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-        SND_PCM_FORMAT_S16_BE,
-#else
-        SND_PCM_FORMAT_S16_LE, //format
-#endif
-        SND_PCM_ACCESS_RW_INTERLEAVED, //access
-        _playChannels, //channels
-        _playoutFreq, //rate
-        1, //soft_resample
-        ALSA_PLAYOUT_LATENCY //40*1000 //latency required overall latency in us
-    )) < 0)
-    {   /* 0.5sec */
-        _playoutFramesIn10MS = 0;
-        LOG(LS_ERROR) << "unable to set playback device: "
-                      << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
-        ErrorRecovery(errVal, _handlePlayout);
-        errVal = LATE(snd_pcm_close)(_handlePlayout);
-        _handlePlayout = NULL;
-        return -1;
-    }
-
-    errVal = LATE(snd_pcm_get_params)(_handlePlayout,
-        &_playoutBufferSizeInFrame, &_playoutPeriodSizeInFrame);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
-                      << " (" << errVal << ")";
-        _playoutBufferSizeInFrame = 0;
-        _playoutPeriodSizeInFrame = 0;
-    }
-    else {
-        LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
-                        << _playoutBufferSizeInFrame << " period_size :"
-                        << _playoutPeriodSizeInFrame;
-    }
-
-    if (_ptrAudioBuffer)
-    {
-        // Update webrtc audio buffer with the selected parameters
-        _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
-        _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
-    }
-
-    // Set play buffer size
-    _playoutBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
-        _handlePlayout, _playoutFramesIn10MS);
-
-    // Init varaibles used for play
-
-    if (_handlePlayout != NULL)
-    {
-        _playIsInitialized = true;
-        return 0;
-    }
-    else
-    {
-        return -1;
-    }
-
+  if (_playIsInitialized) {
     return 0;
-}
+  }
+  // Initialize the speaker (devices might have been added or removed)
+  if (InitSpeaker() == -1) {
+    LOG(LS_WARNING) << "InitSpeaker() failed";
+  }
 
-int32_t AudioDeviceLinuxALSA::InitRecording()
-{
-
-    int errVal = 0;
-
-    rtc::CritScope lock(&_critSect);
-
-    if (_recording)
-    {
-        return -1;
+  // Start by closing any existing wave-output devices
+  //
+  if (_handlePlayout != NULL) {
+    LATE(snd_pcm_close)(_handlePlayout);
+    _handlePlayout = NULL;
+    _playIsInitialized = false;
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error closing current playout sound device, error: "
+                    << LATE(snd_strerror)(errVal);
     }
+  }
 
-    if (!_inputDeviceIsSpecified)
-    {
-        return -1;
-    }
+  // Open PCM device for playout
+  char deviceName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
+                 kAdmMaxDeviceNameSize);
 
-    if (_recIsInitialized)
-    {
-        return 0;
-    }
+  LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
 
-    // Initialize the microphone (devices might have been added or removed)
-    if (InitMicrophone() == -1)
-    {
-        LOG(LS_WARNING) << "InitMicrophone() failed";
-    }
+  errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+                              SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
 
-    // Start by closing any existing pcm-input devices
-    //
-    if (_handleRecord != NULL)
-    {
-        int errVal = LATE(snd_pcm_close)(_handleRecord);
-        _handleRecord = NULL;
-        _recIsInitialized = false;
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR)
-                << "Error closing current recording sound device, error: "
-                << LATE(snd_strerror)(errVal);
-        }
-    }
-
-    // Open PCM device for recording
-    // The corresponding settings for playout are made after the record settings
-    char deviceName[kAdmMaxDeviceNameSize] = {0};
-    GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
-                   kAdmMaxDeviceNameSize);
-
-    LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
-    errVal = LATE(snd_pcm_open)
-                 (&_handleRecord,
-                  deviceName,
-                  SND_PCM_STREAM_CAPTURE,
-                  SND_PCM_NONBLOCK);
-
-    // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
-    if (errVal == -EBUSY) // Device busy - try some more!
-    {
-        for (int i=0; i < 5; i++)
-        {
-            SleepMs(1000);
-            errVal = LATE(snd_pcm_open)
-                         (&_handleRecord,
-                          deviceName,
-                          SND_PCM_STREAM_CAPTURE,
-                          SND_PCM_NONBLOCK);
-            if (errVal == 0)
-            {
-                break;
-            }
-        }
-    }
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "unable to open record device: "
-                      << LATE(snd_strerror)(errVal);
-        _handleRecord = NULL;
-        return -1;
-    }
-
-    _recordingFramesIn10MS = _recordingFreq/100;
-    if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-        SND_PCM_FORMAT_S16_BE, //format
-#else
-        SND_PCM_FORMAT_S16_LE, //format
-#endif
-        SND_PCM_ACCESS_RW_INTERLEAVED, //access
-        _recChannels, //channels
-        _recordingFreq, //rate
-        1, //soft_resample
-        ALSA_CAPTURE_LATENCY //latency in us
-    )) < 0)
-    {
-         // Fall back to another mode then.
-         if (_recChannels == 1)
-           _recChannels = 2;
-         else
-           _recChannels = 1;
-
-         if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_ARCH_BIG_ENDIAN)
-             SND_PCM_FORMAT_S16_BE, //format
-#else
-             SND_PCM_FORMAT_S16_LE, //format
-#endif
-             SND_PCM_ACCESS_RW_INTERLEAVED, //access
-             _recChannels, //channels
-             _recordingFreq, //rate
-             1, //soft_resample
-             ALSA_CAPTURE_LATENCY //latency in us
-         )) < 0)
-         {
-             _recordingFramesIn10MS = 0;
-             LOG(LS_ERROR) << "unable to set record settings: "
-                           << LATE(snd_strerror)(errVal) << " (" << errVal
-                           << ")";
-             ErrorRecovery(errVal, _handleRecord);
-             errVal = LATE(snd_pcm_close)(_handleRecord);
-             _handleRecord = NULL;
-             return -1;
-         }
-    }
-
-    errVal = LATE(snd_pcm_get_params)(_handleRecord,
-        &_recordingBuffersizeInFrame, &_recordingPeriodSizeInFrame);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal)
-                      << " (" << errVal << ")";
-        _recordingBuffersizeInFrame = 0;
-        _recordingPeriodSizeInFrame = 0;
-    }
-    else {
-        LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
-                        << _recordingBuffersizeInFrame << ", period_size:"
-                        << _recordingPeriodSizeInFrame;
-    }
-
-    if (_ptrAudioBuffer)
-    {
-        // Update webrtc audio buffer with the selected parameters
-        _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
-        _ptrAudioBuffer->SetRecordingChannels(_recChannels);
-    }
-
-    // Set rec buffer size and create buffer
-    _recordingBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
-        _handleRecord, _recordingFramesIn10MS);
-
-    if (_handleRecord != NULL)
-    {
-        // Mark recording side as initialized
-        _recIsInitialized = true;
-        return 0;
-    }
-    else
-    {
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StartRecording()
-{
-
-    if (!_recIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_recording)
-    {
-        return 0;
-    }
-
-    _recording = true;
-
-    int errVal = 0;
-    _recordingFramesLeft = _recordingFramesIn10MS;
-
-    // Make sure we only create the buffer once.
-    if (!_recordingBuffer)
-        _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
-    if (!_recordingBuffer)
-    {
-        LOG(LS_ERROR) << "failed to alloc recording buffer";
-        _recording = false;
-        return -1;
-    }
-    // RECORDING
-    _ptrThreadRec.reset(new rtc::PlatformThread(
-        RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
-
-    _ptrThreadRec->Start();
-    _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
-
-    errVal = LATE(snd_pcm_prepare)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
-                      << LATE(snd_strerror)(errVal) << ")\n";
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
-    errVal = LATE(snd_pcm_start)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_start err: "
-                      << LATE(snd_strerror)(errVal);
-        errVal = LATE(snd_pcm_start)(_handleRecord);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
-                          << LATE(snd_strerror)(errVal);
-            StopRecording();
-            return -1;
-        }
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceLinuxALSA::StopRecording()
-{
-
-    {
-      rtc::CritScope lock(&_critSect);
-
-      if (!_recIsInitialized)
-      {
-          return 0;
+  if (errVal == -EBUSY)  // Device busy - try some more!
+  {
+    for (int i = 0; i < 5; i++) {
+      SleepMs(1000);
+      errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+                                  SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+      if (errVal == 0) {
+        break;
       }
-
-      if (_handleRecord == NULL)
-      {
-          return -1;
-      }
-
-      // Make sure we don't start recording (it's asynchronous).
-      _recIsInitialized = false;
-      _recording = false;
     }
+  }
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "unable to open playback device: "
+                  << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+    _handlePlayout = NULL;
+    return -1;
+  }
 
-    if (_ptrThreadRec)
-    {
-        _ptrThreadRec->Stop();
-        _ptrThreadRec.reset();
-    }
+  _playoutFramesIn10MS = _playoutFreq / 100;
+  if ((errVal = LATE(snd_pcm_set_params)(
+           _handlePlayout,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+           SND_PCM_FORMAT_S16_BE,
+#else
+           SND_PCM_FORMAT_S16_LE,                             // format
+#endif
+           SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+           _playChannels,                  // channels
+           _playoutFreq,                   // rate
+           1,                              // soft_resample
+           ALSA_PLAYOUT_LATENCY  // 40*1000 //latency required overall latency
+                                 // in us
+           )) < 0) {             /* 0.5sec */
+    _playoutFramesIn10MS = 0;
+    LOG(LS_ERROR) << "unable to set playback device: "
+                  << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+    ErrorRecovery(errVal, _handlePlayout);
+    errVal = LATE(snd_pcm_close)(_handlePlayout);
+    _handlePlayout = NULL;
+    return -1;
+  }
 
-    rtc::CritScope lock(&_critSect);
-    _recordingFramesLeft = 0;
-    if (_recordingBuffer)
-    {
-        delete [] _recordingBuffer;
-        _recordingBuffer = NULL;
-    }
+  errVal = LATE(snd_pcm_get_params)(_handlePlayout, &_playoutBufferSizeInFrame,
+                                    &_playoutPeriodSizeInFrame);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
+                  << " (" << errVal << ")";
+    _playoutBufferSizeInFrame = 0;
+    _playoutPeriodSizeInFrame = 0;
+  } else {
+    LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
+                    << _playoutBufferSizeInFrame
+                    << " period_size :" << _playoutPeriodSizeInFrame;
+  }
 
-    // Stop and close pcm recording device.
-    int errVal = LATE(snd_pcm_drop)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  if (_ptrAudioBuffer) {
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
+    _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
+  }
 
-    errVal = LATE(snd_pcm_close)(_handleRecord);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error closing record sound device, error: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Set play buffer size
+  _playoutBufferSizeIn10MS =
+      LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesIn10MS);
 
-    // Check if we have muted and unmute if so.
-    bool muteEnabled = false;
-    MicrophoneMute(muteEnabled);
-    if (muteEnabled)
-    {
-        SetMicrophoneMute(false);
-    }
+  // Init varaibles used for play
 
-    // set the pcm input handle to NULL
+  if (_handlePlayout != NULL) {
+    _playIsInitialized = true;
+    return 0;
+  } else {
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecording() {
+  int errVal = 0;
+
+  rtc::CritScope lock(&_critSect);
+
+  if (_recording) {
+    return -1;
+  }
+
+  if (!_inputDeviceIsSpecified) {
+    return -1;
+  }
+
+  if (_recIsInitialized) {
+    return 0;
+  }
+
+  // Initialize the microphone (devices might have been added or removed)
+  if (InitMicrophone() == -1) {
+    LOG(LS_WARNING) << "InitMicrophone() failed";
+  }
+
+  // Start by closing any existing pcm-input devices
+  //
+  if (_handleRecord != NULL) {
+    int errVal = LATE(snd_pcm_close)(_handleRecord);
     _handleRecord = NULL;
+    _recIsInitialized = false;
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error closing current recording sound device, error: "
+                    << LATE(snd_strerror)(errVal);
+    }
+  }
+
+  // Open PCM device for recording
+  // The corresponding settings for playout are made after the record settings
+  char deviceName[kAdmMaxDeviceNameSize] = {0};
+  GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
+                 kAdmMaxDeviceNameSize);
+
+  LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
+  errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+                              SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+
+  // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
+  if (errVal == -EBUSY)  // Device busy - try some more!
+  {
+    for (int i = 0; i < 5; i++) {
+      SleepMs(1000);
+      errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+                                  SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+      if (errVal == 0) {
+        break;
+      }
+    }
+  }
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "unable to open record device: "
+                  << LATE(snd_strerror)(errVal);
+    _handleRecord = NULL;
+    return -1;
+  }
+
+  _recordingFramesIn10MS = _recordingFreq / 100;
+  if ((errVal =
+           LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+                                    SND_PCM_FORMAT_S16_BE,  // format
+#else
+                                    SND_PCM_FORMAT_S16_LE,    // format
+#endif
+                                    SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+                                    _recChannels,                   // channels
+                                    _recordingFreq,                 // rate
+                                    1,                    // soft_resample
+                                    ALSA_CAPTURE_LATENCY  // latency in us
+                                    )) < 0) {
+    // Fall back to another mode then.
+    if (_recChannels == 1)
+      _recChannels = 2;
+    else
+      _recChannels = 1;
+
+    if ((errVal =
+             LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+                                      SND_PCM_FORMAT_S16_BE,  // format
+#else
+                                      SND_PCM_FORMAT_S16_LE,  // format
+#endif
+                                      SND_PCM_ACCESS_RW_INTERLEAVED,  // access
+                                      _recChannels,         // channels
+                                      _recordingFreq,       // rate
+                                      1,                    // soft_resample
+                                      ALSA_CAPTURE_LATENCY  // latency in us
+                                      )) < 0) {
+      _recordingFramesIn10MS = 0;
+      LOG(LS_ERROR) << "unable to set record settings: "
+                    << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+      ErrorRecovery(errVal, _handleRecord);
+      errVal = LATE(snd_pcm_close)(_handleRecord);
+      _handleRecord = NULL;
+      return -1;
+    }
+  }
+
+  errVal = LATE(snd_pcm_get_params)(_handleRecord, &_recordingBuffersizeInFrame,
+                                    &_recordingPeriodSizeInFrame);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal) << " ("
+                  << errVal << ")";
+    _recordingBuffersizeInFrame = 0;
+    _recordingPeriodSizeInFrame = 0;
+  } else {
+    LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
+                    << _recordingBuffersizeInFrame
+                    << ", period_size:" << _recordingPeriodSizeInFrame;
+  }
+
+  if (_ptrAudioBuffer) {
+    // Update webrtc audio buffer with the selected parameters
+    _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
+    _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+  }
+
+  // Set rec buffer size and create buffer
+  _recordingBufferSizeIn10MS =
+      LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesIn10MS);
+
+  if (_handleRecord != NULL) {
+    // Mark recording side as initialized
+    _recIsInitialized = true;
     return 0;
+  } else {
+    return -1;
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::RecordingIsInitialized() const
-{
-    return (_recIsInitialized);
+int32_t AudioDeviceLinuxALSA::StartRecording() {
+  if (!_recIsInitialized) {
+    return -1;
+  }
+
+  if (_recording) {
+    return 0;
+  }
+
+  _recording = true;
+
+  int errVal = 0;
+  _recordingFramesLeft = _recordingFramesIn10MS;
+
+  // Make sure we only create the buffer once.
+  if (!_recordingBuffer)
+    _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+  if (!_recordingBuffer) {
+    LOG(LS_ERROR) << "failed to alloc recording buffer";
+    _recording = false;
+    return -1;
+  }
+  // RECORDING
+  _ptrThreadRec.reset(new rtc::PlatformThread(
+      RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
+
+  _ptrThreadRec->Start();
+  _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
+
+  errVal = LATE(snd_pcm_prepare)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
+                  << LATE(snd_strerror)(errVal) << ")\n";
+    // just log error
+    // if snd_pcm_open fails will return -1
+  }
+
+  errVal = LATE(snd_pcm_start)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_start err: "
+                  << LATE(snd_strerror)(errVal);
+    errVal = LATE(snd_pcm_start)(_handleRecord);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
+                    << LATE(snd_strerror)(errVal);
+      StopRecording();
+      return -1;
+    }
+  }
+
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Recording() const
-{
-    return (_recording);
-}
+int32_t AudioDeviceLinuxALSA::StopRecording() {
+  {
+    rtc::CritScope lock(&_critSect);
 
-bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const
-{
-    return (_playIsInitialized);
-}
-
-int32_t AudioDeviceLinuxALSA::StartPlayout()
-{
-    if (!_playIsInitialized)
-    {
-        return -1;
+    if (!_recIsInitialized) {
+      return 0;
     }
 
-    if (_playing)
-    {
-        return 0;
-    }
-
-    _playing = true;
-
-    _playoutFramesLeft = 0;
-    if (!_playoutBuffer)
-        _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
-    if (!_playoutBuffer)
-    {
-      LOG(LS_ERROR) << "failed to alloc playout buf";
-      _playing = false;
+    if (_handleRecord == NULL) {
       return -1;
     }
 
-    // PLAYOUT
-    _ptrThreadPlay.reset(new rtc::PlatformThread(
-        PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
-    _ptrThreadPlay->Start();
-    _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
+    // Make sure we don't start recording (it's asynchronous).
+    _recIsInitialized = false;
+    _recording = false;
+  }
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
-                      << LATE(snd_strerror)(errVal) << ")\n";
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
+  if (_ptrThreadRec) {
+    _ptrThreadRec->Stop();
+    _ptrThreadRec.reset();
+  }
 
-    return 0;
+  rtc::CritScope lock(&_critSect);
+  _recordingFramesLeft = 0;
+  if (_recordingBuffer) {
+    delete[] _recordingBuffer;
+    _recordingBuffer = NULL;
+  }
+
+  // Stop and close pcm recording device.
+  int errVal = LATE(snd_pcm_drop)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  errVal = LATE(snd_pcm_close)(_handleRecord);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error closing record sound device, error: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  // Check if we have muted and unmute if so.
+  bool muteEnabled = false;
+  MicrophoneMute(muteEnabled);
+  if (muteEnabled) {
+    SetMicrophoneMute(false);
+  }
+
+  // set the pcm input handle to NULL
+  _handleRecord = NULL;
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::StopPlayout()
-{
+bool AudioDeviceLinuxALSA::RecordingIsInitialized() const {
+  return (_recIsInitialized);
+}
 
-    {
-        rtc::CritScope lock(&_critSect);
+bool AudioDeviceLinuxALSA::Recording() const {
+  return (_recording);
+}
 
-        if (!_playIsInitialized)
-        {
-            return 0;
-        }
+bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const {
+  return (_playIsInitialized);
+}
 
-        if (_handlePlayout == NULL)
-        {
-            return -1;
-        }
+int32_t AudioDeviceLinuxALSA::StartPlayout() {
+  if (!_playIsInitialized) {
+    return -1;
+  }
 
-        _playing = false;
-    }
+  if (_playing) {
+    return 0;
+  }
 
-    // stop playout thread first
-    if (_ptrThreadPlay)
-    {
-        _ptrThreadPlay->Stop();
-        _ptrThreadPlay.reset();
-    }
+  _playing = true;
 
+  _playoutFramesLeft = 0;
+  if (!_playoutBuffer)
+    _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
+  if (!_playoutBuffer) {
+    LOG(LS_ERROR) << "failed to alloc playout buf";
+    _playing = false;
+    return -1;
+  }
+
+  // PLAYOUT
+  _ptrThreadPlay.reset(new rtc::PlatformThread(
+      PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
+  _ptrThreadPlay->Start();
+  _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
+
+  int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
+                  << LATE(snd_strerror)(errVal) << ")\n";
+    // just log error
+    // if snd_pcm_open fails will return -1
+  }
+
+  return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayout() {
+  {
     rtc::CritScope lock(&_critSect);
 
-    _playoutFramesLeft = 0;
-    delete [] _playoutBuffer;
-    _playoutBuffer = NULL;
-
-    // stop and close pcm playout device
-    int errVal = LATE(snd_pcm_drop)(_handlePlayout);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+    if (!_playIsInitialized) {
+      return 0;
     }
 
-    errVal = LATE(snd_pcm_close)(_handlePlayout);
-     if (errVal < 0)
-         LOG(LS_ERROR) << "Error closing playout sound device, error: "
-                       << LATE(snd_strerror)(errVal);
+    if (_handlePlayout == NULL) {
+      return -1;
+    }
 
-     // set the pcm input handle to NULL
-     _playIsInitialized = false;
-     _handlePlayout = NULL;
-     LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+    _playing = false;
+  }
 
-     return 0;
+  // stop playout thread first
+  if (_ptrThreadPlay) {
+    _ptrThreadPlay->Stop();
+    _ptrThreadPlay.reset();
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  _playoutFramesLeft = 0;
+  delete[] _playoutBuffer;
+  _playoutBuffer = NULL;
+
+  // stop and close pcm playout device
+  int errVal = LATE(snd_pcm_drop)(_handlePlayout);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+  }
+
+  errVal = LATE(snd_pcm_close)(_handlePlayout);
+  if (errVal < 0)
+    LOG(LS_ERROR) << "Error closing playout sound device, error: "
+                  << LATE(snd_strerror)(errVal);
+
+  // set the pcm input handle to NULL
+  _playIsInitialized = false;
+  _handlePlayout = NULL;
+  LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const
-{
-    delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
-    return 0;
+int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const {
+  delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
+  return 0;
 }
 
-bool AudioDeviceLinuxALSA::Playing() const
-{
-    return (_playing);
+bool AudioDeviceLinuxALSA::Playing() const {
+  return (_playing);
 }
 
 // ============================================================================
 //                                 Private Methods
 // ============================================================================
 
-int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
-    const int32_t function,
-    const bool playback,
-    const int32_t enumDeviceNo,
-    char* enumDeviceName,
-    const int32_t ednLen) const
-{
+int32_t AudioDeviceLinuxALSA::GetDevicesInfo(const int32_t function,
+                                             const bool playback,
+                                             const int32_t enumDeviceNo,
+                                             char* enumDeviceName,
+                                             const int32_t ednLen) const {
+  // Device enumeration based on libjingle implementation
+  // by Tristan Schmelcher at Google Inc.
 
-    // Device enumeration based on libjingle implementation
-    // by Tristan Schmelcher at Google Inc.
+  const char* type = playback ? "Output" : "Input";
+  // dmix and dsnoop are only for playback and capture, respectively, but ALSA
+  // stupidly includes them in both lists.
+  const char* ignorePrefix = playback ? "dsnoop:" : "dmix:";
+  // (ALSA lists many more "devices" of questionable interest, but we show them
+  // just in case the weird devices may actually be desirable for some
+  // users/systems.)
 
-    const char *type = playback ? "Output" : "Input";
-    // dmix and dsnoop are only for playback and capture, respectively, but ALSA
-    // stupidly includes them in both lists.
-    const char *ignorePrefix = playback ? "dsnoop:" : "dmix:" ;
-    // (ALSA lists many more "devices" of questionable interest, but we show them
-    // just in case the weird devices may actually be desirable for some
-    // users/systems.)
+  int err;
+  int enumCount(0);
+  bool keepSearching(true);
 
-    int err;
-    int enumCount(0);
-    bool keepSearching(true);
-
-    // From Chromium issue 95797
-    // Loop through the sound cards to get Alsa device hints.
-    // Don't use snd_device_name_hint(-1,..) since there is a access violation
-    // inside this ALSA API with libasound.so.2.0.0.
-    int card = -1;
-    while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
-        void **hints;
-        err = LATE(snd_device_name_hint)(card, "pcm", &hints);
-        if (err != 0)
-        {
-            LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
-                          << LATE(snd_strerror)(err);
-            return -1;
-        }
-
-        enumCount++; // default is 0
-        if ((function == FUNC_GET_DEVICE_NAME ||
-            function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
-        {
-            strcpy(enumDeviceName, "default");
-
-            err = LATE(snd_device_name_free_hint)(hints);
-            if (err != 0)
-            {
-                LOG(LS_ERROR)
-                    << "GetDevicesInfo - device name free hint error: "
+  // From Chromium issue 95797
+  // Loop through the sound cards to get Alsa device hints.
+  // Don't use snd_device_name_hint(-1,..) since there is a access violation
+  // inside this ALSA API with libasound.so.2.0.0.
+  int card = -1;
+  while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
+    void** hints;
+    err = LATE(snd_device_name_hint)(card, "pcm", &hints);
+    if (err != 0) {
+      LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
                     << LATE(snd_strerror)(err);
-            }
-
-            return 0;
-        }
-
-        for (void **list = hints; *list != NULL; ++list)
-        {
-            char *actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
-            if (actualType)
-            {   // NULL means it's both.
-                bool wrongType = (strcmp(actualType, type) != 0);
-                free(actualType);
-                if (wrongType)
-                {
-                    // Wrong type of device (i.e., input vs. output).
-                    continue;
-                }
-            }
-
-            char *name = LATE(snd_device_name_get_hint)(*list, "NAME");
-            if (!name)
-            {
-                LOG(LS_ERROR) << "Device has no name";
-                // Skip it.
-                continue;
-            }
-
-            // Now check if we actually want to show this device.
-            if (strcmp(name, "default") != 0 &&
-                strcmp(name, "null") != 0 &&
-                strcmp(name, "pulse") != 0 &&
-                strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0)
-            {
-                // Yes, we do.
-                char *desc = LATE(snd_device_name_get_hint)(*list, "DESC");
-                if (!desc)
-                {
-                    // Virtual devices don't necessarily have descriptions.
-                    // Use their names instead.
-                    desc = name;
-                }
-
-                if (FUNC_GET_NUM_OF_DEVICE == function)
-                {
-                    LOG(LS_VERBOSE) << "Enum device " << enumCount << " - "
-                                    << name;
-
-                }
-                if ((FUNC_GET_DEVICE_NAME == function) &&
-                    (enumDeviceNo == enumCount))
-                {
-                    // We have found the enum device, copy the name to buffer.
-                    strncpy(enumDeviceName, desc, ednLen);
-                    enumDeviceName[ednLen-1] = '\0';
-                    keepSearching = false;
-                    // Replace '\n' with '-'.
-                    char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
-                    if (pret)
-                        *pret = '-';
-                }
-                if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
-                    (enumDeviceNo == enumCount))
-                {
-                    // We have found the enum device, copy the name to buffer.
-                    strncpy(enumDeviceName, name, ednLen);
-                    enumDeviceName[ednLen-1] = '\0';
-                    keepSearching = false;
-                }
-
-                if (keepSearching)
-                    ++enumCount;
-
-                if (desc != name)
-                    free(desc);
-            }
-
-            free(name);
-
-            if (!keepSearching)
-                break;
-        }
-
-        err = LATE(snd_device_name_free_hint)(hints);
-        if (err != 0)
-        {
-            LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
-                          << LATE(snd_strerror)(err);
-            // Continue and return true anyway, since we did get the whole list.
-        }
+      return -1;
     }
 
-    if (FUNC_GET_NUM_OF_DEVICE == function)
-    {
-        if (enumCount == 1) // only default?
-            enumCount = 0;
-        return enumCount; // Normal return point for function 0
+    enumCount++;  // default is 0
+    if ((function == FUNC_GET_DEVICE_NAME ||
+         function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) &&
+        enumDeviceNo == 0) {
+      strcpy(enumDeviceName, "default");
+
+      err = LATE(snd_device_name_free_hint)(hints);
+      if (err != 0) {
+        LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+                      << LATE(snd_strerror)(err);
+      }
+
+      return 0;
     }
 
-    if (keepSearching)
-    {
-        // If we get here for function 1 and 2, we didn't find the specified
-        // enum device.
-        LOG(LS_ERROR)
-            << "GetDevicesInfo - Could not find device name or numbers";
-        return -1;
+    for (void** list = hints; *list != NULL; ++list) {
+      char* actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
+      if (actualType) {  // NULL means it's both.
+        bool wrongType = (strcmp(actualType, type) != 0);
+        free(actualType);
+        if (wrongType) {
+          // Wrong type of device (i.e., input vs. output).
+          continue;
+        }
+      }
+
+      char* name = LATE(snd_device_name_get_hint)(*list, "NAME");
+      if (!name) {
+        LOG(LS_ERROR) << "Device has no name";
+        // Skip it.
+        continue;
+      }
+
+      // Now check if we actually want to show this device.
+      if (strcmp(name, "default") != 0 && strcmp(name, "null") != 0 &&
+          strcmp(name, "pulse") != 0 &&
+          strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) {
+        // Yes, we do.
+        char* desc = LATE(snd_device_name_get_hint)(*list, "DESC");
+        if (!desc) {
+          // Virtual devices don't necessarily have descriptions.
+          // Use their names instead.
+          desc = name;
+        }
+
+        if (FUNC_GET_NUM_OF_DEVICE == function) {
+          LOG(LS_VERBOSE) << "Enum device " << enumCount << " - " << name;
+        }
+        if ((FUNC_GET_DEVICE_NAME == function) && (enumDeviceNo == enumCount)) {
+          // We have found the enum device, copy the name to buffer.
+          strncpy(enumDeviceName, desc, ednLen);
+          enumDeviceName[ednLen - 1] = '\0';
+          keepSearching = false;
+          // Replace '\n' with '-'.
+          char* pret = strchr(enumDeviceName, '\n' /*0xa*/);  // LF
+          if (pret)
+            *pret = '-';
+        }
+        if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
+            (enumDeviceNo == enumCount)) {
+          // We have found the enum device, copy the name to buffer.
+          strncpy(enumDeviceName, name, ednLen);
+          enumDeviceName[ednLen - 1] = '\0';
+          keepSearching = false;
+        }
+
+        if (keepSearching)
+          ++enumCount;
+
+        if (desc != name)
+          free(desc);
+      }
+
+      free(name);
+
+      if (!keepSearching)
+        break;
     }
 
-    return 0;
+    err = LATE(snd_device_name_free_hint)(hints);
+    if (err != 0) {
+      LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+                    << LATE(snd_strerror)(err);
+      // Continue and return true anyway, since we did get the whole list.
+    }
+  }
+
+  if (FUNC_GET_NUM_OF_DEVICE == function) {
+    if (enumCount == 1)  // only default?
+      enumCount = 0;
+    return enumCount;  // Normal return point for function 0
+  }
+
+  if (keepSearching) {
+    // If we get here for function 1 and 2, we didn't find the specified
+    // enum device.
+    LOG(LS_ERROR) << "GetDevicesInfo - Could not find device name or numbers";
+    return -1;
+  }
+
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const
-{
-    if (_handleRecord == NULL)
-    {
-        LOG(LS_ERROR) << "input state has been modified during unlocked period";
-        return -1;
-    }
-    return 0;
+int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const {
+  if (_handleRecord == NULL) {
+    LOG(LS_ERROR) << "input state has been modified during unlocked period";
+    return -1;
+  }
+  return 0;
 }
 
-int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const
-{
-    if (_handlePlayout == NULL)
-    {
-        LOG(LS_ERROR)
-            << "output state has been modified during unlocked period";
-        return -1;
-    }
-    return 0;
+int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const {
+  if (_handlePlayout == NULL) {
+    LOG(LS_ERROR) << "output state has been modified during unlocked period";
+    return -1;
+  }
+  return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
-                                            snd_pcm_t* deviceHandle)
-{
-    int st = LATE(snd_pcm_state)(deviceHandle);
-    LOG(LS_VERBOSE) << "Trying to recover from "
-         << ((LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
-         ? "capture" : "playout") << " error: " << LATE(snd_strerror)(error)
-         << " (" << error << ") (state " << st << ")";
+                                            snd_pcm_t* deviceHandle) {
+  int st = LATE(snd_pcm_state)(deviceHandle);
+  LOG(LS_VERBOSE) << "Trying to recover from "
+                  << ((LATE(snd_pcm_stream)(deviceHandle) ==
+                       SND_PCM_STREAM_CAPTURE)
+                          ? "capture"
+                          : "playout")
+                  << " error: " << LATE(snd_strerror)(error) << " (" << error
+                  << ") (state " << st << ")";
 
-    // It is recommended to use snd_pcm_recover for all errors. If that function
-    // cannot handle the error, the input error code will be returned, otherwise
-    // 0 is returned. From snd_pcm_recover API doc: "This functions handles
-    // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
-    // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
-    // trying to prepare given stream for next I/O."
+  // It is recommended to use snd_pcm_recover for all errors. If that function
+  // cannot handle the error, the input error code will be returned, otherwise
+  // 0 is returned. From snd_pcm_recover API doc: "This functions handles
+  // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
+  // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
+  // trying to prepare given stream for next I/O."
 
-    /** Open */
-    //    SND_PCM_STATE_OPEN = 0,
-    /** Setup installed */
-    //    SND_PCM_STATE_SETUP,
-    /** Ready to start */
-    //    SND_PCM_STATE_PREPARED,
-    /** Running */
-    //    SND_PCM_STATE_RUNNING,
-    /** Stopped: underrun (playback) or overrun (capture) detected */
-    //    SND_PCM_STATE_XRUN,= 4
-    /** Draining: running (playback) or stopped (capture) */
-    //    SND_PCM_STATE_DRAINING,
-    /** Paused */
-    //    SND_PCM_STATE_PAUSED,
-    /** Hardware is suspended */
-    //    SND_PCM_STATE_SUSPENDED,
-    //  ** Hardware is disconnected */
-    //    SND_PCM_STATE_DISCONNECTED,
-    //    SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
+  /** Open */
+  //    SND_PCM_STATE_OPEN = 0,
+  /** Setup installed */
+  //    SND_PCM_STATE_SETUP,
+  /** Ready to start */
+  //    SND_PCM_STATE_PREPARED,
+  /** Running */
+  //    SND_PCM_STATE_RUNNING,
+  /** Stopped: underrun (playback) or overrun (capture) detected */
+  //    SND_PCM_STATE_XRUN,= 4
+  /** Draining: running (playback) or stopped (capture) */
+  //    SND_PCM_STATE_DRAINING,
+  /** Paused */
+  //    SND_PCM_STATE_PAUSED,
+  /** Hardware is suspended */
+  //    SND_PCM_STATE_SUSPENDED,
+  //  ** Hardware is disconnected */
+  //    SND_PCM_STATE_DISCONNECTED,
+  //    SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
 
-    // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
-    // in Sthlm lab.
+  // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
+  // in Sthlm lab.
 
-    int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
-    if (0 == res)
-    {
-        LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
+  int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
+  if (0 == res) {
+    LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
 
-        if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
-            _recording &&
-            LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
-        {
-            // For capture streams we also have to repeat the explicit start()
-            // to get data flowing again.
-            int err = LATE(snd_pcm_start)(deviceHandle);
-            if (err != 0)
-            {
-                LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
-                return -1;
-            }
-        }
-
-        if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
-            _playing &&
-            LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK)
-        {
-            // For capture streams we also have to repeat the explicit start() to get
-            // data flowing again.
-            int err = LATE(snd_pcm_start)(deviceHandle);
-            if (err != 0)
-            {
-              LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
-                            << LATE(snd_strerror)(err);
-              return -1;
-            }
-        }
-
-        return -EPIPE == error ? 1 : 0;
-    }
-    else {
-        LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+    if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
+        _recording &&
+        LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) {
+      // For capture streams we also have to repeat the explicit start()
+      // to get data flowing again.
+      int err = LATE(snd_pcm_start)(deviceHandle);
+      if (err != 0) {
+        LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
+        return -1;
+      }
     }
 
-    return res;
+    if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
+        _playing &&
+        LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK) {
+      // For capture streams we also have to repeat the explicit start() to get
+      // data flowing again.
+      int err = LATE(snd_pcm_start)(deviceHandle);
+      if (err != 0) {
+        LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
+                      << LATE(snd_strerror)(err);
+        return -1;
+      }
+    }
+
+    return -EPIPE == error ? 1 : 0;
+  } else {
+    LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+  }
+
+  return res;
 }
 
 // ============================================================================
 //                                  Thread Methods
 // ============================================================================
 
-bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis)
-{
-    return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
+bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis) {
+  return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
 }
 
-bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis)
-{
-    return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
+bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis) {
+  return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
 }
 
-bool AudioDeviceLinuxALSA::PlayThreadProcess()
-{
-    if(!_playing)
-        return false;
+bool AudioDeviceLinuxALSA::PlayThreadProcess() {
+  if (!_playing)
+    return false;
 
-    int err;
-    snd_pcm_sframes_t frames;
-    snd_pcm_sframes_t avail_frames;
+  int err;
+  snd_pcm_sframes_t frames;
+  snd_pcm_sframes_t avail_frames;
 
-    Lock();
-    //return a positive number of frames ready otherwise a negative error code
-    avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
-    if (avail_frames < 0)
-    {
-        LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
-                      << LATE(snd_strerror)(avail_frames);
-        ErrorRecovery(avail_frames, _handlePlayout);
-        UnLock();
-        return true;
-    }
-    else if (avail_frames == 0)
-    {
-        UnLock();
-
-        //maximum tixe in milliseconds to wait, a negative value means infinity
-        err = LATE(snd_pcm_wait)(_handlePlayout, 2);
-        if (err == 0)
-        { //timeout occured
-            LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
-        }
-
-        return true;
-    }
-
-    if (_playoutFramesLeft <= 0)
-    {
-        UnLock();
-        _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
-        Lock();
-
-        _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
-        assert(_playoutFramesLeft == _playoutFramesIn10MS);
-    }
-
-    if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
-        avail_frames = _playoutFramesLeft;
-
-    int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout,
-        _playoutFramesLeft);
-    frames = LATE(snd_pcm_writei)(
-        _handlePlayout,
-        &_playoutBuffer[_playoutBufferSizeIn10MS - size],
-        avail_frames);
-
-    if (frames < 0)
-    {
-        LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
-                        << LATE(snd_strerror)(frames);
-        _playoutFramesLeft = 0;
-        ErrorRecovery(frames, _handlePlayout);
-        UnLock();
-        return true;
-    }
-    else {
-        assert(frames==avail_frames);
-        _playoutFramesLeft -= frames;
-    }
-
+  Lock();
+  // return a positive number of frames ready otherwise a negative error code
+  avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
+  if (avail_frames < 0) {
+    LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
+                  << LATE(snd_strerror)(avail_frames);
+    ErrorRecovery(avail_frames, _handlePlayout);
     UnLock();
     return true;
-}
+  } else if (avail_frames == 0) {
+    UnLock();
 
-bool AudioDeviceLinuxALSA::RecThreadProcess()
-{
-    if (!_recording)
-        return false;
+    // maximum tixe in milliseconds to wait, a negative value means infinity
+    err = LATE(snd_pcm_wait)(_handlePlayout, 2);
+    if (err == 0) {  // timeout occured
+      LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
+    }
 
-    int err;
-    snd_pcm_sframes_t frames;
-    snd_pcm_sframes_t avail_frames;
-    int8_t buffer[_recordingBufferSizeIn10MS];
+    return true;
+  }
 
+  if (_playoutFramesLeft <= 0) {
+    UnLock();
+    _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
     Lock();
 
-    //return a positive number of frames ready otherwise a negative error code
-    avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
-    if (avail_frames < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
-                      << LATE(snd_strerror)(avail_frames);
-        ErrorRecovery(avail_frames, _handleRecord);
-        UnLock();
-        return true;
-    }
-    else if (avail_frames == 0)
-    { // no frame is available now
-        UnLock();
+    _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+    assert(_playoutFramesLeft == _playoutFramesIn10MS);
+  }
 
-        //maximum time in milliseconds to wait, a negative value means infinity
-        err = LATE(snd_pcm_wait)(_handleRecord,
-            ALSA_CAPTURE_WAIT_TIMEOUT);
-        if (err == 0) //timeout occured
-            LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+  if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
+    avail_frames = _playoutFramesLeft;
 
-        return true;
-    }
+  int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesLeft);
+  frames = LATE(snd_pcm_writei)(
+      _handlePlayout, &_playoutBuffer[_playoutBufferSizeIn10MS - size],
+      avail_frames);
 
-    if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
-        avail_frames = _recordingFramesLeft;
-
-    frames = LATE(snd_pcm_readi)(_handleRecord,
-        buffer, avail_frames); // frames to be written
-    if (frames < 0)
-    {
-        LOG(LS_ERROR) << "capture snd_pcm_readi error: "
-                      << LATE(snd_strerror)(frames);
-        ErrorRecovery(frames, _handleRecord);
-        UnLock();
-        return true;
-    }
-    else if (frames > 0)
-    {
-        assert(frames == avail_frames);
-
-        int left_size = LATE(snd_pcm_frames_to_bytes)(_handleRecord,
-            _recordingFramesLeft);
-        int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
-
-        memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
-               buffer, size);
-        _recordingFramesLeft -= frames;
-
-        if (!_recordingFramesLeft)
-        { // buf is full
-            _recordingFramesLeft = _recordingFramesIn10MS;
-
-            // store the recorded buffer (no action will be taken if the
-            // #recorded samples is not a full buffer)
-            _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
-                                               _recordingFramesIn10MS);
-
-            uint32_t currentMicLevel = 0;
-            uint32_t newMicLevel = 0;
-
-            if (AGC())
-            {
-                // store current mic level in the audio buffer if AGC is enabled
-                if (MicrophoneVolume(currentMicLevel) == 0)
-                {
-                    if (currentMicLevel == 0xffffffff)
-                        currentMicLevel = 100;
-                    // this call does not affect the actual microphone volume
-                    _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
-                }
-            }
-
-            // calculate delay
-            _playoutDelay = 0;
-            _recordingDelay = 0;
-            if (_handlePlayout)
-            {
-                err = LATE(snd_pcm_delay)(_handlePlayout,
-                    &_playoutDelay); // returned delay in frames
-                if (err < 0)
-                {
-                    // TODO(xians): Shall we call ErrorRecovery() here?
-                    _playoutDelay = 0;
-                    LOG(LS_ERROR) << "playout snd_pcm_delay: "
-                                  << LATE(snd_strerror)(err);
-                }
-            }
-
-            err = LATE(snd_pcm_delay)(_handleRecord,
-                &_recordingDelay); // returned delay in frames
-            if (err < 0)
-            {
-                // TODO(xians): Shall we call ErrorRecovery() here?
-                _recordingDelay = 0;
-                LOG(LS_ERROR) << "capture snd_pcm_delay: "
-                              << LATE(snd_strerror)(err);
-            }
-
-           // TODO(xians): Shall we add 10ms buffer delay to the record delay?
-            _ptrAudioBuffer->SetVQEData(
-                _playoutDelay * 1000 / _playoutFreq,
-                _recordingDelay * 1000 / _recordingFreq, 0);
-
-            _ptrAudioBuffer->SetTypingStatus(KeyPressed());
-
-            // Deliver recorded samples at specified sample rate, mic level etc.
-            // to the observer using callback.
-            UnLock();
-            _ptrAudioBuffer->DeliverRecordedData();
-            Lock();
-
-            if (AGC())
-            {
-                newMicLevel = _ptrAudioBuffer->NewMicLevel();
-                if (newMicLevel != 0)
-                {
-                    // The VQE will only deliver non-zero microphone levels when a
-                    // change is needed. Set this new mic level (received from the
-                    // observer as return value in the callback).
-                    if (SetMicrophoneVolume(newMicLevel) == -1)
-                        LOG(LS_WARNING)
-                            << "the required modification of the microphone volume failed";
-                }
-            }
-        }
-    }
-
+  if (frames < 0) {
+    LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
+                    << LATE(snd_strerror)(frames);
+    _playoutFramesLeft = 0;
+    ErrorRecovery(frames, _handlePlayout);
     UnLock();
     return true;
+  } else {
+    assert(frames == avail_frames);
+    _playoutFramesLeft -= frames;
+  }
+
+  UnLock();
+  return true;
 }
 
+bool AudioDeviceLinuxALSA::RecThreadProcess() {
+  if (!_recording)
+    return false;
 
-bool AudioDeviceLinuxALSA::KeyPressed() const{
+  int err;
+  snd_pcm_sframes_t frames;
+  snd_pcm_sframes_t avail_frames;
+  int8_t buffer[_recordingBufferSizeIn10MS];
+
+  Lock();
+
+  // return a positive number of frames ready otherwise a negative error code
+  avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
+  if (avail_frames < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
+                  << LATE(snd_strerror)(avail_frames);
+    ErrorRecovery(avail_frames, _handleRecord);
+    UnLock();
+    return true;
+  } else if (avail_frames == 0) {  // no frame is available now
+    UnLock();
+
+    // maximum time in milliseconds to wait, a negative value means infinity
+    err = LATE(snd_pcm_wait)(_handleRecord, ALSA_CAPTURE_WAIT_TIMEOUT);
+    if (err == 0)  // timeout occured
+      LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+
+    return true;
+  }
+
+  if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
+    avail_frames = _recordingFramesLeft;
+
+  frames = LATE(snd_pcm_readi)(_handleRecord, buffer,
+                               avail_frames);  // frames to be written
+  if (frames < 0) {
+    LOG(LS_ERROR) << "capture snd_pcm_readi error: "
+                  << LATE(snd_strerror)(frames);
+    ErrorRecovery(frames, _handleRecord);
+    UnLock();
+    return true;
+  } else if (frames > 0) {
+    assert(frames == avail_frames);
+
+    int left_size =
+        LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft);
+    int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
+
+    memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size], buffer,
+           size);
+    _recordingFramesLeft -= frames;
+
+    if (!_recordingFramesLeft) {  // buf is full
+      _recordingFramesLeft = _recordingFramesIn10MS;
+
+      // store the recorded buffer (no action will be taken if the
+      // #recorded samples is not a full buffer)
+      _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+                                         _recordingFramesIn10MS);
+
+      uint32_t currentMicLevel = 0;
+      uint32_t newMicLevel = 0;
+
+      if (AGC()) {
+        // store current mic level in the audio buffer if AGC is enabled
+        if (MicrophoneVolume(currentMicLevel) == 0) {
+          if (currentMicLevel == 0xffffffff)
+            currentMicLevel = 100;
+          // this call does not affect the actual microphone volume
+          _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
+        }
+      }
+
+      // calculate delay
+      _playoutDelay = 0;
+      _recordingDelay = 0;
+      if (_handlePlayout) {
+        err = LATE(snd_pcm_delay)(_handlePlayout,
+                                  &_playoutDelay);  // returned delay in frames
+        if (err < 0) {
+          // TODO(xians): Shall we call ErrorRecovery() here?
+          _playoutDelay = 0;
+          LOG(LS_ERROR) << "playout snd_pcm_delay: " << LATE(snd_strerror)(err);
+        }
+      }
+
+      err = LATE(snd_pcm_delay)(_handleRecord,
+                                &_recordingDelay);  // returned delay in frames
+      if (err < 0) {
+        // TODO(xians): Shall we call ErrorRecovery() here?
+        _recordingDelay = 0;
+        LOG(LS_ERROR) << "capture snd_pcm_delay: " << LATE(snd_strerror)(err);
+      }
+
+      // TODO(xians): Shall we add 10ms buffer delay to the record delay?
+      _ptrAudioBuffer->SetVQEData(_playoutDelay * 1000 / _playoutFreq,
+                                  _recordingDelay * 1000 / _recordingFreq, 0);
+
+      _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+      // Deliver recorded samples at specified sample rate, mic level etc.
+      // to the observer using callback.
+      UnLock();
+      _ptrAudioBuffer->DeliverRecordedData();
+      Lock();
+
+      if (AGC()) {
+        newMicLevel = _ptrAudioBuffer->NewMicLevel();
+        if (newMicLevel != 0) {
+          // The VQE will only deliver non-zero microphone levels when a
+          // change is needed. Set this new mic level (received from the
+          // observer as return value in the callback).
+          if (SetMicrophoneVolume(newMicLevel) == -1)
+            LOG(LS_WARNING)
+                << "the required modification of the microphone volume failed";
+        }
+      }
+    }
+  }
+
+  UnLock();
+  return true;
+}
+
+bool AudioDeviceLinuxALSA::KeyPressed() const {
 #if defined(USE_X11)
   char szKey[32];
   unsigned int i = 0;
diff --git a/modules/audio_device/linux/audio_device_pulse_linux.cc b/modules/audio_device/linux/audio_device_pulse_linux.cc
index f1eddd6..b9614bf 100644
--- a/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -2136,8 +2136,7 @@
               NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
         _writeErrors++;
         if (_writeErrors > 10) {
-          LOG(LS_ERROR) << "Playout error: _writeErrors="
-                        << _writeErrors
+          LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
                         << ", error=" << LATE(pa_context_errno)(_paContext);
           _writeErrors = 0;
         }
@@ -2180,8 +2179,7 @@
                                 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
         _writeErrors++;
         if (_writeErrors > 10) {
-          LOG(LS_ERROR) << "Playout error: _writeErrors="
-                        << _writeErrors
+          LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
                         << ", error=" << LATE(pa_context_errno)(_paContext);
           _writeErrors = 0;
         }
diff --git a/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc b/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
index 02d9cf9..aabf388 100644
--- a/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
+++ b/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
@@ -21,433 +21,368 @@
 #define LATE(sym) \
   LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
-namespace webrtc
-{
+namespace webrtc {
 
-AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA() :
-    _outputMixerHandle(NULL),
-    _inputMixerHandle(NULL),
-    _outputMixerElement(NULL),
-    _inputMixerElement(NULL)
-{
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA()
+    : _outputMixerHandle(NULL),
+      _inputMixerHandle(NULL),
+      _outputMixerElement(NULL),
+      _inputMixerElement(NULL) {
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 
-    memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
-    memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+  memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+  memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
 }
 
-AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
-    Close();
+AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+  Close();
 }
 
 // ============================================================================
 //                                    PUBLIC METHODS
 // ============================================================================
 
-int32_t AudioMixerManagerLinuxALSA::Close()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::Close() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    CloseSpeaker();
-    CloseMicrophone();
+  CloseSpeaker();
+  CloseMicrophone();
 
-    return 0;
-
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::CloseSpeaker()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    int errVal = 0;
+  int errVal = 0;
 
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing playout mixer";
-        LATE(snd_mixer_free)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_close)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-        _outputMixerHandle = NULL;
-        _outputMixerElement = NULL;
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing playout mixer";
+    LATE(snd_mixer_free)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing playout mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
-    memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxALSA::CloseMicrophone()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
-
-    rtc::CritScope lock(&_critSect);
-
-    int errVal = 0;
-
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        LATE(snd_mixer_free)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer 2";
-
-        errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer 3";
-
-        errVal = LATE(snd_mixer_close)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-
-        LOG(LS_VERBOSE) << "Closing record mixer 4";
-        _inputMixerHandle = NULL;
-        _inputMixerElement = NULL;
+    errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching playout mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
-    memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
-                    << deviceName << ")";
-
-    rtc::CritScope lock(&_critSect);
-
-    int errVal = 0;
-
-    // Close any existing output mixer handle
-    //
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing playout mixer";
-
-        LATE(snd_mixer_free)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching playout mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        errVal = LATE(snd_mixer_close)(_outputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
+    errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
     }
     _outputMixerHandle = NULL;
     _outputMixerElement = NULL;
+  }
+  memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
 
-    errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
-        return -1;
-    }
-
-    char controlName[kAdmMaxDeviceNameSize] = { 0 };
-    GetControlName(controlName, deviceName);
-
-    LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
-                    << ")";
-
-    errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
-                      << ") error: " << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
-    }
-    strcpy(_outputMixerStr, controlName);
-
-    errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR)
-            << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
-            << "error: " << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
-    }
-
-    // Load and find the proper mixer element
-    if (LoadSpeakerMixerElement() < 0)
-    {
-        return -1;
-    }
-
-    if (_outputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "the output mixer device is now open ("
-                        << _outputMixerHandle << ")";
-    }
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char *deviceName)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
-                    << deviceName << ")";
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    int errVal = 0;
+  int errVal = 0;
 
-    // Close any existing input mixer handle
-    //
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "Closing record mixer";
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing record mixer";
 
-        LATE(snd_mixer_free)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error freeing record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error detaching record mixer: "
-                          << LATE(snd_strerror)(errVal);
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
-
-        errVal = LATE(snd_mixer_close)(_inputMixerHandle);
-        if (errVal < 0)
-        {
-            LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
-                          << errVal;
-        }
-        LOG(LS_VERBOSE) << "Closing record mixer";
+    LATE(snd_mixer_free)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing record mixer: "
+                    << LATE(snd_strerror)(errVal);
     }
+    LOG(LS_VERBOSE) << "Closing record mixer 2";
+
+    errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer 3";
+
+    errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+
+    LOG(LS_VERBOSE) << "Closing record mixer 4";
     _inputMixerHandle = NULL;
     _inputMixerElement = NULL;
+  }
+  memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
 
-    errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
-        return -1;
-    }
-
-    char controlName[kAdmMaxDeviceNameSize] = { 0 };
-    GetControlName(controlName, deviceName);
-
-    LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
-                    << ")";
-
-    errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
-                      << ") error: " << LATE(snd_strerror)(errVal);
-
-        _inputMixerHandle = NULL;
-        return -1;
-    }
-    strcpy(_inputMixerStr, controlName);
-
-    errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR)
-            << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
-            << "error: " << LATE(snd_strerror)(errVal);
-
-        _inputMixerHandle = NULL;
-        return -1;
-    }
-    // Load and find the proper mixer element
-    if (LoadMicMixerElement() < 0)
-    {
-        return -1;
-    }
-
-    if (_inputMixerHandle != NULL)
-    {
-        LOG(LS_VERBOSE) << "the input mixer device is now open ("
-                        << _inputMixerHandle << ")";
-    }
-
-    return 0;
+  return 0;
 }
 
-bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const
-{
-    LOG(LS_INFO) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
+                  << deviceName << ")";
 
-    return (_outputMixerHandle != NULL);
+  rtc::CritScope lock(&_critSect);
+
+  int errVal = 0;
+
+  // Close any existing output mixer handle
+  //
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing playout mixer";
+
+    LATE(snd_mixer_free)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing playout mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching playout mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+  }
+  _outputMixerHandle = NULL;
+  _outputMixerElement = NULL;
+
+  errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
+    return -1;
+  }
+
+  char controlName[kAdmMaxDeviceNameSize] = {0};
+  GetControlName(controlName, deviceName);
+
+  LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+                  << ")";
+
+  errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+                  << ") error: " << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+  strcpy(_outputMixerStr, controlName);
+
+  errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
+  if (errVal < 0) {
+    LOG(LS_ERROR)
+        << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
+        << "error: " << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+
+  // Load and find the proper mixer element
+  if (LoadSpeakerMixerElement() < 0) {
+    return -1;
+  }
+
+  if (_outputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "the output mixer device is now open ("
+                    << _outputMixerHandle << ")";
+  }
+
+  return 0;
 }
 
-bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const
-{
-    LOG(LS_INFO) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
+                  << deviceName << ")";
 
-    return (_inputMixerHandle != NULL);
+  rtc::CritScope lock(&_critSect);
+
+  int errVal = 0;
+
+  // Close any existing input mixer handle
+  //
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    LATE(snd_mixer_free)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error freeing record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error detaching record mixer: "
+                    << LATE(snd_strerror)(errVal);
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+
+    errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+    if (errVal < 0) {
+      LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" << errVal;
+    }
+    LOG(LS_VERBOSE) << "Closing record mixer";
+  }
+  _inputMixerHandle = NULL;
+  _inputMixerElement = NULL;
+
+  errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
+    return -1;
+  }
+
+  char controlName[kAdmMaxDeviceNameSize] = {0};
+  GetControlName(controlName, deviceName);
+
+  LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+                  << ")";
+
+  errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+                  << ") error: " << LATE(snd_strerror)(errVal);
+
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+  strcpy(_inputMixerStr, controlName);
+
+  errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
+                  << "error: " << LATE(snd_strerror)(errVal);
+
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+  // Load and find the proper mixer element
+  if (LoadMicMixerElement() < 0) {
+    return -1;
+  }
+
+  if (_inputMixerHandle != NULL) {
+    LOG(LS_VERBOSE) << "the input mixer device is now open ("
+                    << _inputMixerHandle << ")";
+  }
+
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(
-    uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
-                    << volume << ")";
+bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const {
+  LOG(LS_INFO) << __FUNCTION__;
 
-    rtc::CritScope lock(&_critSect);
-
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
-
-    int errVal =
-        LATE(snd_mixer_selem_set_playback_volume_all)(_outputMixerElement,
-                                                      volume);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error changing master volume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-
-    return (0);
+  return (_outputMixerHandle != NULL);
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(
-    uint32_t& volume) const
-{
+bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const {
+  LOG(LS_INFO) << __FUNCTION__;
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  return (_inputMixerHandle != NULL);
+}
 
-    long int vol(0);
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
+                  << volume << ")";
 
-    int
-        errVal = LATE(snd_mixer_selem_get_playback_volume)(
-            _outputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &vol);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting outputvolume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
-                    << vol;
+  rtc::CritScope lock(&_critSect);
 
-    volume = static_cast<uint32_t> (vol);
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    return 0;
+  int errVal = LATE(snd_mixer_selem_set_playback_volume_all)(
+      _outputMixerElement, volume);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error changing master volume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+
+  return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
+
+  long int vol(0);
+
+  int errVal = LATE(snd_mixer_selem_get_playback_volume)(
+      _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting outputvolume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
+                  << vol;
+
+  volume = static_cast<uint32_t>(vol);
+
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume(
-    uint32_t& maxVolume) const
-{
+    uint32_t& maxVolume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avilable output mixer element exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avilable output mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+      _outputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement,
-                                                        &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
 
-    LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  maxVolume = static_cast<uint32_t>(maxVol);
 
-    maxVolume = static_cast<uint32_t> (maxVol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume(
-    uint32_t& minVolume) const
-{
+    uint32_t& minVolume) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+      _outputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement,
-                                                        &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
 
-    LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  minVolume = static_cast<uint32_t>(minVol);
 
-    minVolume = static_cast<uint32_t> (minVol);
-
-    return 0;
+  return 0;
 }
 
 // TL: Have done testnig with these but they don't seem reliable and
@@ -534,239 +469,195 @@
  }
  */
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(
-    bool& available)
-{
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
+  available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(
-    bool& available)
-{
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+  available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
+                  << enable << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer element exists";
-        return -1;
-    }
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer element exists";
+    return -1;
+  }
 
-    // Ensure that the selected speaker destination has a valid mute control.
-    bool available(false);
-    SpeakerMuteIsAvailable(available);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the speaker";
-        return -1;
-    }
+  // Ensure that the selected speaker destination has a valid mute control.
+  bool available(false);
+  SpeakerMuteIsAvailable(available);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the speaker";
+    return -1;
+  }
 
-    // Note value = 0 (off) means muted
-    int errVal =
-        LATE(snd_mixer_selem_set_playback_switch_all)(_outputMixerElement,
-                                                      !enable);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error setting playback switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  int errVal = LATE(snd_mixer_selem_set_playback_switch_all)(
+      _outputMixerElement, !enable);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error setting playback switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
-int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const
-{
+int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const {
+  if (_outputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable output mixer exists";
+    return -1;
+  }
 
-    if (_outputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable output mixer exists";
-        return -1;
-    }
+  // Ensure that the selected speaker destination has a valid mute control.
+  bool available =
+      LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the speaker";
+    return -1;
+  }
 
-    // Ensure that the selected speaker destination has a valid mute control.
-    bool available =
-        LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the speaker";
-        return -1;
-    }
+  int value(false);
 
-    int value(false);
+  // Retrieve one boolean control value for a specified mute-control
+  //
+  int errVal = LATE(snd_mixer_selem_get_playback_switch)(
+      _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting playback switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    // Retrieve one boolean control value for a specified mute-control
-    //
-    int
-        errVal = LATE(snd_mixer_selem_get_playback_switch)(
-            _outputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &value);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting playback switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  enabled = (bool)!value;
 
-    // Note value = 0 (off) means muted
-    enabled = (bool) !value;
-
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(
-    bool& available)
-{
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
-    return 0;
+  available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
+                  << enable << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    // Ensure that the selected microphone destination has a valid mute control.
-    bool available(false);
-    MicrophoneMuteIsAvailable(available);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the microphone";
-        return -1;
-    }
+  // Ensure that the selected microphone destination has a valid mute control.
+  bool available(false);
+  MicrophoneMuteIsAvailable(available);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the microphone";
+    return -1;
+  }
 
-    // Note value = 0 (off) means muted
-    int errVal =
-        LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement,
-                                                     !enable);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error setting capture switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  int errVal =
+      LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error setting capture switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer exists";
-        return -1;
-    }
+  // Ensure that the selected microphone destination has a valid mute control.
+  bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+  if (!available) {
+    LOG(LS_WARNING) << "it is not possible to mute the microphone";
+    return -1;
+  }
 
-    // Ensure that the selected microphone destination has a valid mute control.
-    bool available =
-        LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
-    if (!available)
-    {
-        LOG(LS_WARNING) << "it is not possible to mute the microphone";
-        return -1;
-    }
+  int value(false);
 
-    int value(false);
+  // Retrieve one boolean control value for a specified mute-control
+  //
+  int errVal = LATE(snd_mixer_selem_get_capture_switch)(
+      _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting capture switch: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    // Retrieve one boolean control value for a specified mute-control
-    //
-    int
-        errVal = LATE(snd_mixer_selem_get_capture_switch)(
-            _inputMixerElement,
-            (snd_mixer_selem_channel_id_t) 0,
-            &value);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting capture switch: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  // Note value = 0 (off) means muted
+  enabled = (bool)!value;
 
-    // Note value = 0 (off) means muted
-    enabled = (bool) !value;
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable(
-    bool& available)
-{
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+    bool& available) {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
+  available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(
-    uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume="
-                    << volume << ")";
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    int
-        errVal =
-            LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement,
-                                                         volume);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error changing microphone volume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
+  int errVal =
+      LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error changing microphone volume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
 
-    return (0);
+  return (0);
 }
 
 // TL: Have done testnig with these but they don't seem reliable and
@@ -799,8 +690,8 @@
 
  maxVol = (long int)maxVolume;
  printf("min %d max %d", minVol, maxVol);
- errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol, maxVol);
- LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
+ errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol,
+ maxVol); LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
                  << ", max: " << maxVol;
  if (errVal != 0)
  {
@@ -855,263 +746,220 @@
  }
  */
 
-int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(
-    uint32_t& volume) const
-{
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int vol(0);
 
-    long int vol(0);
+  int errVal = LATE(snd_mixer_selem_get_capture_volume)(
+      _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "Error getting inputvolume: "
+                  << LATE(snd_strerror)(errVal);
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol="
+                  << vol;
 
-    int
-        errVal =
-            LATE(snd_mixer_selem_get_capture_volume)(
-                _inputMixerElement,
-                (snd_mixer_selem_channel_id_t) 0,
-                &vol);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "Error getting inputvolume: "
-                      << LATE(snd_strerror)(errVal);
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol="
-                    << vol;
+  volume = static_cast<uint32_t>(vol);
 
-    volume = static_cast<uint32_t> (vol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume(
-    uint32_t& maxVolume) const
-{
+    uint32_t& maxVolume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  // check if we have mic volume at all
+  if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) {
+    LOG(LS_ERROR) << "No microphone volume available";
+    return -1;
+  }
 
-    // check if we have mic volume at all
-    if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement))
-    {
-        LOG(LS_ERROR) << "No microphone volume available";
-        return -1;
-    }
+  int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+      _inputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement,
-                                                       &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting microphone volume range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting microphone volume range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  maxVolume = static_cast<uint32_t>(maxVol);
 
-    maxVolume = static_cast<uint32_t> (maxVol);
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume(
-    uint32_t& minVolume) const
-{
+    uint32_t& minVolume) const {
+  if (_inputMixerElement == NULL) {
+    LOG(LS_WARNING) << "no avaliable input mixer element exists";
+    return -1;
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        LOG(LS_WARNING) << "no avaliable input mixer element exists";
-        return -1;
-    }
+  long int minVol(0);
+  long int maxVol(0);
 
-    long int minVol(0);
-    long int maxVol(0);
+  int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+      _inputMixerElement, &minVol, &maxVol);
 
-    int errVal =
-        LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement,
-                                                       &minVol, &maxVol);
+  LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+                  << ", max: " << maxVol;
+  if (maxVol <= minVol) {
+    LOG(LS_ERROR) << "Error getting microphone volume range: "
+                  << LATE(snd_strerror)(errVal);
+  }
 
-    LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
-                    << ", max: " << maxVol;
-    if (maxVol <= minVol)
-    {
-        LOG(LS_ERROR) << "Error getting microphone volume range: "
-                      << LATE(snd_strerror)(errVal);
-    }
+  minVolume = static_cast<uint32_t>(minVol);
 
-    minVolume = static_cast<uint32_t> (minVol);
-
-    return 0;
+  return 0;
 }
 
 // ============================================================================
 //                                 Private Methods
 // ============================================================================
 
-int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const
-{
-    int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
-                      << LATE(snd_strerror)(errVal);
-        _inputMixerHandle = NULL;
-        return -1;
+int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const {
+  int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
+                  << LATE(snd_strerror)(errVal);
+    _inputMixerHandle = NULL;
+    return -1;
+  }
+
+  snd_mixer_elem_t* elem = NULL;
+  snd_mixer_elem_t* micElem = NULL;
+  unsigned mixerIdx = 0;
+  const char* selemName = NULL;
+
+  // Find and store handles to the right mixer elements
+  for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem;
+       elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+    if (LATE(snd_mixer_selem_is_active)(elem)) {
+      selemName = LATE(snd_mixer_selem_get_name)(elem);
+      if (strcmp(selemName, "Capture") == 0)  // "Capture", "Mic"
+      {
+        _inputMixerElement = elem;
+        LOG(LS_VERBOSE) << "Capture element set";
+      } else if (strcmp(selemName, "Mic") == 0) {
+        micElem = elem;
+        LOG(LS_VERBOSE) << "Mic element found";
+      }
     }
 
-    snd_mixer_elem_t *elem = NULL;
-    snd_mixer_elem_t *micElem = NULL;
-    unsigned mixerIdx = 0;
-    const char *selemName = NULL;
-
-    // Find and store handles to the right mixer elements
-    for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem; elem
-        = LATE(snd_mixer_elem_next)(elem), mixerIdx++)
-    {
-        if (LATE(snd_mixer_selem_is_active)(elem))
-        {
-            selemName = LATE(snd_mixer_selem_get_name)(elem);
-            if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic"
-            {
-                _inputMixerElement = elem;
-                LOG(LS_VERBOSE) << "Capture element set";
-            } else if (strcmp(selemName, "Mic") == 0)
-            {
-                micElem = elem;
-                LOG(LS_VERBOSE) << "Mic element found";
-            }
-        }
-
-        if (_inputMixerElement)
-        {
-            // Use the first Capture element that is found
-            // The second one may not work
-            break;
-        }
+    if (_inputMixerElement) {
+      // Use the first Capture element that is found
+      // The second one may not work
+      break;
     }
+  }
 
-    if (_inputMixerElement == NULL)
-    {
-        // We didn't find a Capture handle, use Mic.
-        if (micElem != NULL)
-        {
-            _inputMixerElement = micElem;
-            LOG(LS_VERBOSE) << "Using Mic as capture volume.";
-        } else
-        {
-            _inputMixerElement = NULL;
-            LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
+  if (_inputMixerElement == NULL) {
+    // We didn't find a Capture handle, use Mic.
+    if (micElem != NULL) {
+      _inputMixerElement = micElem;
+      LOG(LS_VERBOSE) << "Using Mic as capture volume.";
+    } else {
+      _inputMixerElement = NULL;
+      LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
 
-            return -1;
-        }
+      return -1;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const
-{
-    int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
-    if (errVal < 0)
-    {
-        LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
-                      << LATE(snd_strerror)(errVal);
-        _outputMixerHandle = NULL;
-        return -1;
+int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const {
+  int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
+  if (errVal < 0) {
+    LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
+                  << LATE(snd_strerror)(errVal);
+    _outputMixerHandle = NULL;
+    return -1;
+  }
+
+  snd_mixer_elem_t* elem = NULL;
+  snd_mixer_elem_t* masterElem = NULL;
+  snd_mixer_elem_t* speakerElem = NULL;
+  unsigned mixerIdx = 0;
+  const char* selemName = NULL;
+
+  // Find and store handles to the right mixer elements
+  for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem;
+       elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+    if (LATE(snd_mixer_selem_is_active)(elem)) {
+      selemName = LATE(snd_mixer_selem_get_name)(elem);
+      LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
+                      << selemName << " =" << elem;
+
+      // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
+      if (strcmp(selemName, "PCM") == 0) {
+        _outputMixerElement = elem;
+        LOG(LS_VERBOSE) << "PCM element set";
+      } else if (strcmp(selemName, "Master") == 0) {
+        masterElem = elem;
+        LOG(LS_VERBOSE) << "Master element found";
+      } else if (strcmp(selemName, "Speaker") == 0) {
+        speakerElem = elem;
+        LOG(LS_VERBOSE) << "Speaker element found";
+      }
     }
 
-    snd_mixer_elem_t *elem = NULL;
-    snd_mixer_elem_t *masterElem = NULL;
-    snd_mixer_elem_t *speakerElem = NULL;
-    unsigned mixerIdx = 0;
-    const char *selemName = NULL;
-
-    // Find and store handles to the right mixer elements
-    for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem; elem
-        = LATE(snd_mixer_elem_next)(elem), mixerIdx++)
-    {
-        if (LATE(snd_mixer_selem_is_active)(elem))
-        {
-            selemName = LATE(snd_mixer_selem_get_name)(elem);
-            LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
-                            << selemName << " =" << elem;
-
-            // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
-            if (strcmp(selemName, "PCM") == 0)
-            {
-                _outputMixerElement = elem;
-                LOG(LS_VERBOSE) << "PCM element set";
-            } else if (strcmp(selemName, "Master") == 0)
-            {
-                masterElem = elem;
-                LOG(LS_VERBOSE) << "Master element found";
-            } else if (strcmp(selemName, "Speaker") == 0)
-            {
-                speakerElem = elem;
-                LOG(LS_VERBOSE) << "Speaker element found";
-            }
-        }
-
-        if (_outputMixerElement)
-        {
-            // We have found the element we want
-            break;
-        }
+    if (_outputMixerElement) {
+      // We have found the element we want
+      break;
     }
+  }
 
-    // If we didn't find a PCM Handle, use Master or Speaker
-    if (_outputMixerElement == NULL)
-    {
-        if (masterElem != NULL)
-        {
-            _outputMixerElement = masterElem;
-            LOG(LS_VERBOSE) << "Using Master as output volume.";
-        } else if (speakerElem != NULL)
-        {
-            _outputMixerElement = speakerElem;
-            LOG(LS_VERBOSE) << "Using Speaker as output volume.";
-        } else
-        {
-            _outputMixerElement = NULL;
-            LOG(LS_ERROR) << "Could not find output volume in the mixer.";
-            return -1;
-        }
+  // If we didn't find a PCM Handle, use Master or Speaker
+  if (_outputMixerElement == NULL) {
+    if (masterElem != NULL) {
+      _outputMixerElement = masterElem;
+      LOG(LS_VERBOSE) << "Using Master as output volume.";
+    } else if (speakerElem != NULL) {
+      _outputMixerElement = speakerElem;
+      LOG(LS_VERBOSE) << "Using Speaker as output volume.";
+    } else {
+      _outputMixerElement = NULL;
+      LOG(LS_ERROR) << "Could not find output volume in the mixer.";
+      return -1;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
 void AudioMixerManagerLinuxALSA::GetControlName(char* controlName,
-                                                char* deviceName) const
-{
-    // Example
-    // deviceName: "front:CARD=Intel,DEV=0"
-    // controlName: "hw:CARD=Intel"
-    char* pos1 = strchr(deviceName, ':');
-    char* pos2 = strchr(deviceName, ',');
-    if (!pos2) {
-        // Can also be default:CARD=Intel
-        pos2 = &deviceName[strlen(deviceName)];
-    }
-    if (pos1 && pos2) {
-        strcpy(controlName, "hw");
-        int nChar = (int) (pos2 - pos1);
-        strncpy(&controlName[2], pos1, nChar);
-        controlName[2 + nChar] = '\0';
-    } else {
-        strcpy(controlName, deviceName);
-    }
-
+                                                char* deviceName) const {
+  // Example
+  // deviceName: "front:CARD=Intel,DEV=0"
+  // controlName: "hw:CARD=Intel"
+  char* pos1 = strchr(deviceName, ':');
+  char* pos2 = strchr(deviceName, ',');
+  if (!pos2) {
+    // Can also be default:CARD=Intel
+    pos2 = &deviceName[strlen(deviceName)];
+  }
+  if (pos1 && pos2) {
+    strcpy(controlName, "hw");
+    int nChar = (int)(pos2 - pos1);
+    strncpy(&controlName[2], pos1, nChar);
+    controlName[2 + nChar] = '\0';
+  } else {
+    strcpy(controlName, deviceName);
+  }
 }
 
-}
+}  // namespace webrtc
diff --git a/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
index 21f7fd0..80896c9 100644
--- a/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
+++ b/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -23,8 +23,7 @@
   LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \
               sym)
 
-namespace webrtc
-{
+namespace webrtc {
 
 class AutoPulseLock {
  public:
@@ -33,38 +32,34 @@
     LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
   }
 
-  ~AutoPulseLock() {
-    LATE(pa_threaded_mainloop_unlock)(pa_mainloop_);
-  }
+  ~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); }
 
  private:
   pa_threaded_mainloop* const pa_mainloop_;
 };
 
-AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse() :
-    _paOutputDeviceIndex(-1),
-    _paInputDeviceIndex(-1),
-    _paPlayStream(NULL),
-    _paRecStream(NULL),
-    _paMainloop(NULL),
-    _paContext(NULL),
-    _paVolume(0),
-    _paMute(0),
-    _paVolSteps(0),
-    _paSpeakerMute(false),
-    _paSpeakerVolume(PA_VOLUME_NORM),
-    _paChannels(0),
-    _paObjectsSet(false)
-{
-    LOG(LS_INFO) << __FUNCTION__ << " created";
+AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse()
+    : _paOutputDeviceIndex(-1),
+      _paInputDeviceIndex(-1),
+      _paPlayStream(NULL),
+      _paRecStream(NULL),
+      _paMainloop(NULL),
+      _paContext(NULL),
+      _paVolume(0),
+      _paMute(0),
+      _paVolSteps(0),
+      _paSpeakerMute(false),
+      _paSpeakerVolume(PA_VOLUME_NORM),
+      _paChannels(0),
+      _paObjectsSet(false) {
+  LOG(LS_INFO) << __FUNCTION__ << " created";
 }
 
-AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Close();
+  Close();
 }
 
 // ===========================================================================
@@ -73,866 +68,734 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
     pa_threaded_mainloop* mainloop,
-    pa_context* context)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+    pa_context* context) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    if (!mainloop || !context)
-    {
-        LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
-        return -1;
-    }
+  if (!mainloop || !context) {
+    LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
+    return -1;
+  }
 
-    _paMainloop = mainloop;
-    _paContext = context;
-    _paObjectsSet = true;
+  _paMainloop = mainloop;
+  _paContext = context;
+  _paObjectsSet = true;
 
-    LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
+  LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::Close()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::Close() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    CloseSpeaker();
-    CloseMicrophone();
+  CloseSpeaker();
+  CloseMicrophone();
 
-    _paMainloop = NULL;
-    _paContext = NULL;
-    _paObjectsSet = false;
+  _paMainloop = NULL;
+  _paContext = NULL;
+  _paObjectsSet = false;
 
-    return 0;
-
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    // Reset the index to -1
-    _paOutputDeviceIndex = -1;
-    _paPlayStream = NULL;
+  // Reset the index to -1
+  _paOutputDeviceIndex = -1;
+  _paPlayStream = NULL;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    // Reset the index to -1
-    _paInputDeviceIndex = -1;
-    _paRecStream = NULL;
+  // Reset the index to -1
+  _paInputDeviceIndex = -1;
+  _paRecStream = NULL;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
+int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
 
-    _paPlayStream = playStream;
-    return 0;
+  _paPlayStream = playStream;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
+int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
 
-    _paRecStream = recStream;
-    return 0;
+  _paRecStream = recStream;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
-    uint16_t deviceIndex)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
-                    << deviceIndex << ")";
+int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
+                  << deviceIndex << ")";
 
-    // No point in opening the speaker
-    // if PA objects have not been set
-    if (!_paObjectsSet)
-    {
-        LOG(LS_ERROR) << "PulseAudio objects has not been set";
-        return -1;
-    }
+  // No point in opening the speaker
+  // if PA objects have not been set
+  if (!_paObjectsSet) {
+    LOG(LS_ERROR) << "PulseAudio objects has not been set";
+    return -1;
+  }
 
-    // Set the index for the PulseAudio
-    // output device to control
-    _paOutputDeviceIndex = deviceIndex;
+  // Set the index for the PulseAudio
+  // output device to control
+  _paOutputDeviceIndex = deviceIndex;
 
-    LOG(LS_VERBOSE) << "the output mixer device is now open";
+  LOG(LS_VERBOSE) << "the output mixer device is now open";
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
-    uint16_t deviceIndex)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
-        << deviceIndex << ")";
+int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
+                  << deviceIndex << ")";
 
-    // No point in opening the microphone
-    // if PA objects have not been set
-    if (!_paObjectsSet)
-    {
-        LOG(LS_ERROR) << "PulseAudio objects have not been set";
-        return -1;
-    }
+  // No point in opening the microphone
+  // if PA objects have not been set
+  if (!_paObjectsSet) {
+    LOG(LS_ERROR) << "PulseAudio objects have not been set";
+    return -1;
+  }
 
-    // Set the index for the PulseAudio
-    // input device to control
-    _paInputDeviceIndex = deviceIndex;
+  // Set the index for the PulseAudio
+  // input device to control
+  _paInputDeviceIndex = deviceIndex;
 
-    LOG(LS_VERBOSE) << "the input mixer device is now open";
+  LOG(LS_VERBOSE) << "the input mixer device is now open";
 
-    return 0;
+  return 0;
 }
 
-bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__;
+bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__;
 
-    return (_paOutputDeviceIndex != -1);
+  return (_paOutputDeviceIndex != -1);
 }
 
-bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_INFO) << __FUNCTION__;
+bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_INFO) << __FUNCTION__;
 
-    return (_paInputDeviceIndex != -1);
+  return (_paInputDeviceIndex != -1);
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
-    uint32_t volume)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
-                    << volume << ")";
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
+                  << volume << ")";
 
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  bool setFailed(false);
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only really set the volume if we have a connected stream
+    AutoPulseLock auto_lock(_paMainloop);
+
+    // Get the number of channels from the sample specification
+    const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream);
+    if (!spec) {
+      LOG(LS_ERROR) << "could not get sample specification";
+      return -1;
     }
 
-    bool setFailed(false);
+    // Set the same volume for all channels
+    pa_cvolume cVolumes;
+    LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
 
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only really set the volume if we have a connected stream
-        AutoPulseLock auto_lock(_paMainloop);
-
-        // Get the number of channels from the sample specification
-        const pa_sample_spec *spec =
-            LATE(pa_stream_get_sample_spec)(_paPlayStream);
-        if (!spec)
-        {
-            LOG(LS_ERROR) << "could not get sample specification";
-            return -1;
-        }
-
-        // Set the same volume for all channels
-        pa_cvolume cVolumes;
-        LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
-
-        pa_operation* paOperation = NULL;
-        paOperation = LATE(pa_context_set_sink_input_volume)(
-            _paContext,
-            LATE(pa_stream_get_index)(_paPlayStream),
-            &cVolumes,
-            PaSetVolumeCallback, NULL);
-        if (!paOperation)
-        {
-            setFailed = true;
-        }
-
-        // Don't need to wait for the completion
-        LATE(pa_operation_unref)(paOperation);
-    } else
-    {
-        // We have not created a stream or it's not connected to the sink
-        // Save the volume to be set at connection
-        _paSpeakerVolume = volume;
+    pa_operation* paOperation = NULL;
+    paOperation = LATE(pa_context_set_sink_input_volume)(
+        _paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes,
+        PaSetVolumeCallback, NULL);
+    if (!paOperation) {
+      setFailed = true;
     }
 
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not set speaker volume, error="
-                        << LATE(pa_context_errno)(_paContext);
+    // Don't need to wait for the completion
+    LATE(pa_operation_unref)(paOperation);
+  } else {
+    // We have not created a stream or it's not connected to the sink
+    // Save the volume to be set at connection
+    _paSpeakerVolume = volume;
+  }
 
-        return -1;
-    }
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not set speaker volume, error="
+                    << LATE(pa_context_errno)(_paContext);
 
-    return 0;
+    return -1;
+  }
+
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const
-{
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
 
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only get the volume if we have a connected stream
-        if (!GetSinkInputInfo())
-          return -1;
-
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = static_cast<uint32_t> (_paVolume);
-    } else
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = _paSpeakerVolume;
-    }
-
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
-                    << volume;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // PA_VOLUME_NORM corresponds to 100% (0db)
-    // but PA allows up to 150 db amplification
-    maxVolume = static_cast<uint32_t> (PA_VOLUME_NORM);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    minVolume = static_cast<uint32_t> (PA_VOLUME_MUTED);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // Always available in Pulse Audio
-    available = true;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    // Always available in Pulse Audio
-    available = true;
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
-                    << enable << ")";
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    bool setFailed(false);
-
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only really mute if we have a connected stream
-        AutoPulseLock auto_lock(_paMainloop);
-
-        pa_operation* paOperation = NULL;
-        paOperation = LATE(pa_context_set_sink_input_mute)(
-            _paContext,
-            LATE(pa_stream_get_index)(_paPlayStream),
-            (int) enable,
-            PaSetVolumeCallback,
-            NULL);
-        if (!paOperation)
-        {
-            setFailed = true;
-        }
-
-        // Don't need to wait for the completion
-        LATE(pa_operation_unref)(paOperation);
-    } else
-    {
-        // We have not created a stream or it's not connected to the sink
-        // Save the mute status to be set at connection
-        _paSpeakerMute = enable;
-    }
-
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not mute speaker, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const
-{
-
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        // We can only get the mute status if we have a connected stream
-        if (!GetSinkInputInfo())
-          return -1;
-
-        enabled = static_cast<bool> (_paMute);
-    } else
-    {
-        enabled = _paSpeakerMute;
-    }
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled="
-                    << enabled;
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paOutputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "output device index has not been set";
-        return -1;
-    }
-
-    uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-
-        // Get the actual stream device index if we have a connected stream
-        // The device used by the stream can be changed
-        // during the call
-        if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream)
-            != PA_STREAM_UNCONNECTED))
-        {
-            deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
-        }
-    }
-
-    if (!GetSinkInfoByIndex(deviceIndex))
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only get the volume if we have a connected stream
+    if (!GetSinkInputInfo())
       return -1;
 
-    available = static_cast<bool> (_paChannels == 2);
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = static_cast<uint32_t>(_paVolume);
+  } else {
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = _paSpeakerVolume;
+  }
 
-    return 0;
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
+                  << volume;
+
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
+int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume(
+    uint32_t& maxVolume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // PA_VOLUME_NORM corresponds to 100% (0db)
+  // but PA allows up to 150 db amplification
+  maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume(
+    uint32_t& minVolume) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // Always available in Pulse Audio
+  available = true;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  // Always available in Pulse Audio
+  available = true;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
+                  << enable << ")";
+
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  bool setFailed(false);
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only really mute if we have a connected stream
+    AutoPulseLock auto_lock(_paMainloop);
+
+    pa_operation* paOperation = NULL;
+    paOperation = LATE(pa_context_set_sink_input_mute)(
+        _paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable,
+        PaSetVolumeCallback, NULL);
+    if (!paOperation) {
+      setFailed = true;
     }
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
+    // Don't need to wait for the completion
+    LATE(pa_operation_unref)(paOperation);
+  } else {
+    // We have not created a stream or it's not connected to the sink
+    // Save the mute status to be set at connection
+    _paSpeakerMute = enable;
+  }
 
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not mute speaker, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const {
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  if (_paPlayStream &&
+      (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+    // We can only get the mute status if we have a connected stream
+    if (!GetSinkInputInfo())
+      return -1;
+
+    enabled = static_cast<bool>(_paMute);
+  } else {
+    enabled = _paSpeakerMute;
+  }
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled="
+                  << enabled;
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paOutputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "output device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
 
     // Get the actual stream device index if we have a connected stream
     // The device used by the stream can be changed
     // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    if (_paPlayStream &&
+        (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
     }
+  }
 
-    pa_operation* paOperation = NULL;
+  if (!GetSinkInfoByIndex(deviceIndex))
+    return -1;
 
-    // Get info for this source
-    // We want to know if the actual device can record in stereo
-    paOperation = LATE(pa_context_get_source_info_by_index)(
-        _paContext, deviceIndex,
-        PaSourceInfoCallback,
-        (void*) this);
+  available = static_cast<bool>(_paChannels == 2);
 
-    WaitForOperationCompletion(paOperation);
+  return 0;
+}
 
-    available = static_cast<bool> (_paChannels == 2);
+int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
-        << " => available=" << available;
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
 
-    return 0;
+  AutoPulseLock auto_lock(_paMainloop);
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  pa_operation* paOperation = NULL;
+
+  // Get info for this source
+  // We want to know if the actual device can record in stereo
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+  WaitForOperationCompletion(paOperation);
+
+  available = static_cast<bool>(_paChannels == 2);
+
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
+                  << " => available=" << available;
+
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
-    bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Always available in Pulse Audio
-    available = true;
+  // Always available in Pulse Audio
+  available = true;
 
-    return 0;
+  return 0;
 }
 
-int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable="
-                    << enable << ")";
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable="
+                  << enable << ")";
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    bool setFailed(false);
-    pa_operation* paOperation = NULL;
+  bool setFailed(false);
+  pa_operation* paOperation = NULL;
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
 
+  AutoPulseLock auto_lock(_paMainloop);
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  // Set mute switch for the source
+  paOperation = LATE(pa_context_set_source_mute_by_index)(
+      _paContext, deviceIndex, enable, PaSetVolumeCallback, NULL);
+
+  if (!paOperation) {
+    setFailed = true;
+  }
+
+  // Don't need to wait for this to complete.
+  LATE(pa_operation_unref)(paOperation);
+
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not mute microphone, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
-
     // Get the actual stream device index if we have a connected stream
     // The device used by the stream can be changed
     // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    if (_paRecStream &&
+        (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
     }
+  }
 
-    // Set mute switch for the source
-    paOperation = LATE(pa_context_set_source_mute_by_index)(
-        _paContext, deviceIndex,
-        enable,
-        PaSetVolumeCallback, NULL);
+  if (!GetSourceInfoByIndex(deviceIndex))
+    return -1;
 
-    if (!paOperation)
-    {
-        setFailed = true;
-    }
+  enabled = static_cast<bool>(_paMute);
 
-    // Don't need to wait for this to complete.
-    LATE(pa_operation_unref)(paOperation);
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled="
+                  << enabled;
 
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not mute microphone, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
-
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        // Get the actual stream device index if we have a connected stream
-        // The device used by the stream can be changed
-        // during the call
-        if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-            != PA_STREAM_UNCONNECTED))
-        {
-            deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
-        }
-    }
-
-    if (!GetSourceInfoByIndex(deviceIndex))
-      return -1;
-
-    enabled = static_cast<bool> (_paMute);
-
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled="
-        << enabled;
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
-    bool& available)
-{
-    RTC_DCHECK(thread_checker_.CalledOnValidThread());
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+    bool& available) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Always available in Pulse Audio
-    available = true;
+  // Always available in Pulse Audio
+  available = true;
 
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
-{
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume
-        << ")";
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    // Unlike output streams, input streams have no concept of a stream
-    // volume, only a device volume. So we have to change the volume of the
-    // device itself.
+  // Unlike output streams, input streams have no concept of a stream
+  // volume, only a device volume. So we have to change the volume of the
+  // device itself.
 
-    // The device may have a different number of channels than the stream and
-    // their mapping may be different, so we don't want to use the channel
-    // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
-    // bases, and the server allows that even if the device's channel count
-    // is lower, but some buggy PA clients don't like that (the pavucontrol
-    // on Hardy dies in an assert if the channel count is different). So
-    // instead we look up the actual number of channels that the device has.
+  // The device may have a different number of channels than the stream and
+  // their mapping may be different, so we don't want to use the channel
+  // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
+  // bases, and the server allows that even if the device's channel count
+  // is lower, but some buggy PA clients don't like that (the pavucontrol
+  // on Hardy dies in an assert if the channel count is different). So
+  // instead we look up the actual number of channels that the device has.
+  AutoPulseLock auto_lock(_paMainloop);
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  // Get the actual stream device index if we have a connected stream
+  // The device used by the stream can be changed
+  // during the call
+  if (_paRecStream &&
+      (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+    deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+  }
+
+  bool setFailed(false);
+  pa_operation* paOperation = NULL;
+
+  // Get the number of channels for this source
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+  WaitForOperationCompletion(paOperation);
+
+  uint8_t channels = _paChannels;
+  pa_cvolume cVolumes;
+  LATE(pa_cvolume_set)(&cVolumes, channels, volume);
+
+  // Set the volume for the source
+  paOperation = LATE(pa_context_set_source_volume_by_index)(
+      _paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL);
+
+  if (!paOperation) {
+    setFailed = true;
+  }
+
+  // Don't need to wait for this to complete.
+  LATE(pa_operation_unref)(paOperation);
+
+  if (setFailed) {
+    LOG(LS_WARNING) << "could not set microphone volume, error="
+                    << LATE(pa_context_errno)(_paContext);
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
+
+  uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+  {
     AutoPulseLock auto_lock(_paMainloop);
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    // Get the actual stream device index if we have a connected stream
-    // The device used by the stream can be changed
-    // during the call
-    if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-        != PA_STREAM_UNCONNECTED))
-    {
-        deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+    // Get the actual stream device index if we have a connected stream.
+    // The device used by the stream can be changed during the call.
+    if (_paRecStream &&
+        (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+      deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
     }
+  }
 
-    bool setFailed(false);
-    pa_operation* paOperation = NULL;
+  if (!GetSourceInfoByIndex(deviceIndex))
+    return -1;
 
-    // Get the number of channels for this source
-    paOperation
-        = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex,
-                                                    PaSourceInfoCallback,
-                                                    (void*) this);
+  {
+    AutoPulseLock auto_lock(_paMainloop);
+    volume = static_cast<uint32_t>(_paVolume);
+  }
 
-    WaitForOperationCompletion(paOperation);
+  LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol="
+                  << volume;
 
-    uint8_t channels = _paChannels;
-    pa_cvolume cVolumes;
-    LATE(pa_cvolume_set)(&cVolumes, channels, volume);
-
-    // Set the volume for the source
-    paOperation
-        = LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex,
-                                                      &cVolumes,
-                                                      PaSetVolumeCallback,
-                                                      NULL);
-
-    if (!paOperation)
-    {
-        setFailed = true;
-    }
-
-    // Don't need to wait for this to complete.
-    LATE(pa_operation_unref)(paOperation);
-
-    if (setFailed)
-    {
-        LOG(LS_WARNING) << "could not set microphone volume, error="
-                        << LATE(pa_context_errno)(_paContext);
-        return -1;
-    }
-
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const
-{
+int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(
+    uint32_t& maxVolume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  // PA_VOLUME_NORM corresponds to 100% (0db)
+  // PA allows up to 150 db amplification (PA_VOLUME_MAX)
+  // but that doesn't work well for all sound cards
+  maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
 
-    uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
-
-    {
-      AutoPulseLock auto_lock(_paMainloop);
-      // Get the actual stream device index if we have a connected stream.
-      // The device used by the stream can be changed during the call.
-      if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
-          != PA_STREAM_UNCONNECTED))
-      {
-          deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
-      }
-    }
-
-    if (!GetSourceInfoByIndex(deviceIndex))
-        return -1;
-
-    {
-        AutoPulseLock auto_lock(_paMainloop);
-        volume = static_cast<uint32_t> (_paVolume);
-    }
-
-    LOG(LS_VERBOSE)
-        << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol="
-        << volume;
-
-    return 0;
+  return 0;
 }
 
-int32_t
-AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
+int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume(
+    uint32_t& minVolume) const {
+  if (_paInputDeviceIndex == -1) {
+    LOG(LS_WARNING) << "input device index has not been set";
+    return -1;
+  }
 
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
 
-    // PA_VOLUME_NORM corresponds to 100% (0db)
-    // PA allows up to 150 db amplification (PA_VOLUME_MAX)
-    // but that doesn't work well for all sound cards
-    maxVolume = static_cast<uint32_t> (PA_VOLUME_NORM);
-
-    return 0;
-}
-
-int32_t
-AudioMixerManagerLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const
-{
-
-    if (_paInputDeviceIndex == -1)
-    {
-        LOG(LS_WARNING) << "input device index has not been set";
-        return -1;
-    }
-
-    minVolume = static_cast<uint32_t> (PA_VOLUME_MUTED);
-
-    return 0;
+  return 0;
 }
 
 // ===========================================================================
 //                                 Private Methods
 // ===========================================================================
 
-void
-AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
-                                                const pa_sink_info *i,
-                                                int eol,
-                                                void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSinkInfoCallbackHandler(i, eol);
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+                                                     const pa_sink_info* i,
+                                                     int eol,
+                                                     void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(
+      i, eol);
 }
 
-void
-AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
-    pa_context */*c*/,
-    const pa_sink_input_info *i,
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
+    pa_context* /*c*/,
+    const pa_sink_input_info* i,
     int eol,
-    void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSinkInputInfoCallbackHandler(i, eol);
+    void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)
+      ->PaSinkInputInfoCallbackHandler(i, eol);
 }
 
-
-void
-AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
-                                                  const pa_source_info *i,
-                                                  int eol,
-                                                  void *pThis)
-{
-    static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
-        PaSourceInfoCallbackHandler(i, eol);
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+                                                       const pa_source_info* i,
+                                                       int eol,
+                                                       void* pThis) {
+  static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(
+      i, eol);
 }
 
-void
-AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c,
-                                                 int success,
-                                                 void */*pThis*/)
-{
-    if (!success)
-    {
-        LOG(LS_ERROR) << "failed to set volume";
-    }
+void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c,
+                                                      int success,
+                                                      void* /*pThis*/) {
+  if (!success) {
+    LOG(LS_ERROR) << "failed to set volume";
+  }
 }
 
 void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
-    const pa_sink_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_sink_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // get the max volume for any channel
-    _paMute = i->mute; // get mute status
+  }
+  _paVolume = paVolume;  // get the max volume for any channel
+  _paMute = i->mute;     // get mute status
 
-    // supported since PA 0.9.15
-    //_paVolSteps = i->n_volume_steps; // get the number of volume steps
-    // default value is PA_VOLUME_NORM+1
-    _paVolSteps = PA_VOLUME_NORM + 1;
+  // supported since PA 0.9.15
+  //_paVolSteps = i->n_volume_steps; // get the number of volume steps
+  // default value is PA_VOLUME_NORM+1
+  _paVolSteps = PA_VOLUME_NORM + 1;
 }
 
 void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
-    const pa_sink_input_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_sink_input_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // Get the max volume for any channel
-    _paMute = i->mute; // Get mute status
+  }
+  _paVolume = paVolume;  // Get the max volume for any channel
+  _paMute = i->mute;     // Get mute status
 }
 
 void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
-    const pa_source_info *i,
-    int eol)
-{
-    if (eol)
-    {
-        // Signal that we are done
-        LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
-        return;
-    }
+    const pa_source_info* i,
+    int eol) {
+  if (eol) {
+    // Signal that we are done
+    LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+    return;
+  }
 
-    _paChannels = i->channel_map.channels; // Get number of channels
-    pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
-    for (int j = 0; j < _paChannels; ++j)
-    {
-        if (paVolume < i->volume.values[j])
-        {
-            paVolume = i->volume.values[j];
-        }
+  _paChannels = i->channel_map.channels;   // Get number of channels
+  pa_volume_t paVolume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int j = 0; j < _paChannels; ++j) {
+    if (paVolume < i->volume.values[j]) {
+      paVolume = i->volume.values[j];
     }
-    _paVolume = paVolume; // Get the max volume for any channel
-    _paMute = i->mute; // Get mute status
+  }
+  _paVolume = paVolume;  // Get the max volume for any channel
+  _paMute = i->mute;     // Get mute status
 
-    // supported since PA 0.9.15
-    //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
-    // default value is PA_VOLUME_NORM+1
-    _paVolSteps = PA_VOLUME_NORM + 1;
+  // supported since PA 0.9.15
+  //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
+  // default value is PA_VOLUME_NORM+1
+  _paVolSteps = PA_VOLUME_NORM + 1;
 }
 
 void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
-    pa_operation* paOperation) const
-{
-    while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
-    {
-        LATE(pa_threaded_mainloop_wait)(_paMainloop);
-    }
+    pa_operation* paOperation) const {
+  while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+    LATE(pa_threaded_mainloop_wait)(_paMainloop);
+  }
 
-    LATE(pa_operation_unref)(paOperation);
+  LATE(pa_operation_unref)(paOperation);
 }
 
 bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
@@ -941,37 +804,33 @@
   AutoPulseLock auto_lock(_paMainloop);
   // Get info for this stream (sink input).
   paOperation = LATE(pa_context_get_sink_input_info)(
-      _paContext,
-      LATE(pa_stream_get_index)(_paPlayStream),
-      PaSinkInputInfoCallback,
-      (void*) this);
+      _paContext, LATE(pa_stream_get_index)(_paPlayStream),
+      PaSinkInputInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(
-    int device_index) const {
+bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const {
   pa_operation* paOperation = NULL;
 
   AutoPulseLock auto_lock(_paMainloop);
-  paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext,
-      device_index, PaSinkInfoCallback, (void*) this);
+  paOperation = LATE(pa_context_get_sink_info_by_index)(
+      _paContext, device_index, PaSinkInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(
-    int device_index) const {
+bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const {
   pa_operation* paOperation = NULL;
 
   AutoPulseLock auto_lock(_paMainloop);
-  paOperation  = LATE(pa_context_get_source_info_by_index)(
-      _paContext, device_index, PaSourceInfoCallback, (void*) this);
+  paOperation = LATE(pa_context_get_source_info_by_index)(
+      _paContext, device_index, PaSourceInfoCallback, (void*)this);
 
   WaitForOperationCompletion(paOperation);
   return true;
 }
 
-}
+}  // namespace webrtc
diff --git a/modules/audio_device/linux/latebindingsymboltable_linux.cc b/modules/audio_device/linux/latebindingsymboltable_linux.cc
index 35f53fa..7a66c34 100644
--- a/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -19,9 +19,9 @@
 namespace webrtc {
 namespace adm_linux {
 
-inline static const char *GetDllError() {
+inline static const char* GetDllError() {
 #ifdef WEBRTC_LINUX
-  char *err = dlerror();
+  char* err = dlerror();
   if (err) {
     return err;
   } else {
@@ -64,11 +64,11 @@
 }
 
 static bool LoadSymbol(DllHandle handle,
-                       const char *symbol_name,
-                       void **symbol) {
+                       const char* symbol_name,
+                       void** symbol) {
 #ifdef WEBRTC_LINUX
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  char* err = dlerror();
   if (err) {
     LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
     return false;
@@ -87,8 +87,8 @@
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
-                         const char *const symbol_names[],
-                         void *symbols[]) {
+                         const char* const symbol_names[],
+                         void* symbols[]) {
 #ifdef WEBRTC_LINUX
   // Clear any old errors.
   dlerror();
diff --git a/modules/audio_device/mac/audio_device_mac.cc b/modules/audio_device/mac/audio_device_mac.cc
index 27d1cc4..635bd0d 100644
--- a/modules/audio_device/mac/audio_device_mac.cc
+++ b/modules/audio_device/mac/audio_device_mac.cc
@@ -23,34 +23,31 @@
 
 namespace webrtc {
 
-#define WEBRTC_CA_RETURN_ON_ERR(expr)                                  \
+#define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+      return -1;                                                     \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr)                                      \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr)                                       \
   do {                                                                 \
     err = expr;                                                        \
     if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-      return -1;                                                       \
+      logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
     }                                                                  \
   } while (0)
 
-#define WEBRTC_CA_LOG_ERR(expr)                                        \
-  do {                                                                 \
-    err = expr;                                                        \
-    if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-    }                                                                  \
-  } while (0)
-
-#define WEBRTC_CA_LOG_WARN(expr)                                         \
-  do {                                                                   \
-    err = expr;                                                          \
-    if (err != noErr) {                                                  \
-      logCAMsg(rtc::LS_WARNING, "Error in " #expr,                       \
-               (const char*) & err);                                     \
-    }                                                                    \
-  } while (0)
-
 enum { MaxNumberDevices = 64 };
 
 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
@@ -94,7 +91,7 @@
   }
 #else
   // We need to flip the characters in this case.
-   switch (sev) {
+  switch (sev) {
     case rtc::LS_ERROR:
       LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
       break;
@@ -373,8 +370,8 @@
 
   err = AudioHardwareUnload();
   if (err != noErr) {
-    logCAMsg(rtc::LS_ERROR,
-             "Error in AudioHardwareUnload()", (const char*)&err);
+    logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
+             (const char*)&err);
     retVal = -1;
   }
 
@@ -1038,8 +1035,7 @@
       _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
 
   if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable output stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
              (const char*)&_outStreamFormat.mFormatID);
     return -1;
   }
@@ -1146,8 +1142,7 @@
       _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
 
   if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable input stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
              (const char*)&_inStreamFormat.mFormatID);
     return -1;
   }
@@ -1348,12 +1343,11 @@
       _critSect.Leave();  // Cannot be under lock, risk of deadlock
       if (kEventTimeout == _stopEventRec.Wait(2000)) {
         rtc::CritScope critScoped(&_critSect);
-        LOG(LS_WARNING)
-            << "Timed out stopping the capture IOProc."
-            << "We may have failed to detect a device removal.";
+        LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
+                        << "We may have failed to detect a device removal.";
         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
         WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+            AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
       }
       _critSect.Enter();
       _doStopRec = false;
@@ -1377,9 +1371,8 @@
       _critSect.Leave();  // Cannot be under lock, risk of deadlock
       if (kEventTimeout == _stopEvent.Wait(2000)) {
         rtc::CritScope critScoped(&_critSect);
-        LOG(LS_WARNING)
-            << "Timed out stopping the shared IOProc."
-            << "We may have failed to detect a device removal.";
+        LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
+                        << "We may have failed to detect a device removal.";
         // We assume rendering on a shared device has stopped as well if
         // the IOProc times out.
         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
@@ -1391,7 +1384,7 @@
       LOG(LS_INFO) << "Recording stopped (shared device)";
     } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
       WEBRTC_CA_LOG_WARN(
-            AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
       LOG(LS_INFO) << "Recording uninitialized (shared device)";
     }
   }
@@ -1486,9 +1479,8 @@
     _critSect.Leave();  // Cannot be under lock, risk of deadlock
     if (kEventTimeout == _stopEvent.Wait(2000)) {
       rtc::CritScope critScoped(&_critSect);
-      LOG(LS_WARNING)
-          << "Timed out stopping the render IOProc."
-          << "We may have failed to detect a device removal.";
+      LOG(LS_WARNING) << "Timed out stopping the render IOProc."
+                      << "We may have failed to detect a device removal.";
 
       // We assume capturing on a shared device has stopped as well if the
       // IOProc times out.
@@ -1501,11 +1493,11 @@
     LOG(LS_INFO) << "Playout stopped";
   } else if (_twoDevices && _playIsInitialized) {
     WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+        AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
     LOG(LS_INFO) << "Playout uninitialized (output device)";
   } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
     WEBRTC_CA_LOG_WARN(
-          AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+        AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
     LOG(LS_INFO) << "Playout uninitialized (shared device)";
   }
 
@@ -1829,8 +1821,8 @@
     _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
   }
 
-  _renderDelayOffsetSamples = _renderBufSizeSamples -
-                              N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
+  _renderDelayOffsetSamples =
+      _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
                                   _outDesiredFormat.mChannelsPerFrame;
 
   _outDesiredFormat.mBytesPerPacket =
@@ -1909,9 +1901,9 @@
       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
 
   LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
-                  << _renderDelayOffsetSamples << ", _renderDelayUs="
-                  << _renderDelayUs << ", _renderLatencyUs="
-                  << _renderLatencyUs;
+                  << _renderDelayOffsetSamples
+                  << ", _renderDelayUs=" << _renderDelayUs
+                  << ", _renderLatencyUs=" << _renderLatencyUs;
   return 0;
 }
 
@@ -1970,8 +1962,8 @@
       AtomicSet32(&_captureDeviceIsAlive, 0);
       _mixerManager.CloseMicrophone();
     } else if (err != noErr) {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioDeviceGetProperty()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+               (const char*)&err);
       return -1;
     }
   }
@@ -1989,8 +1981,8 @@
       AtomicSet32(&_renderDeviceIsAlive, 0);
       _mixerManager.CloseSpeaker();
     } else if (err != noErr) {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioDeviceGetProperty()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
+               (const char*)&err);
       return -1;
     }
   }
@@ -2016,8 +2008,7 @@
       objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
 
   if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
-    logCAMsg(rtc::LS_ERROR,
-             "Unacceptable input stream format -> mFormatID",
+    logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
              (const char*)&streamFormat.mFormatID);
     return -1;
   }
@@ -2042,8 +2033,7 @@
   LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
                   << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
   LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
-  logCAMsg(rtc::LS_VERBOSE, "mFormatID",
-           (const char*)&streamFormat.mFormatID);
+  logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
 
   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
     const int io_block_size_samples = streamFormat.mChannelsPerFrame *
@@ -2247,8 +2237,8 @@
       LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
       return 1;
     } else {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+               (const char*)&err);
       return 1;
     }
   }
@@ -2485,8 +2475,8 @@
       // This is our own error.
       return false;
     } else {
-      logCAMsg(rtc::LS_ERROR,
-               "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+      logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
+               (const char*)&err);
       return false;
     }
   }
diff --git a/modules/audio_device/mac/audio_mixer_manager_mac.cc b/modules/audio_device/mac/audio_mixer_manager_mac.cc
index 14d3f98..928fae7 100644
--- a/modules/audio_device/mac/audio_mixer_manager_mac.cc
+++ b/modules/audio_device/mac/audio_mixer_manager_mac.cc
@@ -14,34 +14,31 @@
 
 namespace webrtc {
 
-#define WEBRTC_CA_RETURN_ON_ERR(expr)                                  \
+#define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+      return -1;                                                     \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr)                                      \
+  do {                                                               \
+    err = expr;                                                      \
+    if (err != noErr) {                                              \
+      logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
+    }                                                                \
+  } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr)                                       \
   do {                                                                 \
     err = expr;                                                        \
     if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-      return -1;                                                       \
+      logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
     }                                                                  \
   } while (0)
 
-#define WEBRTC_CA_LOG_ERR(expr)                                        \
-  do {                                                                 \
-    err = expr;                                                        \
-    if (err != noErr) {                                                \
-      logCAMsg(rtc::LS_ERROR, "Error in " #expr,                       \
-               (const char*) & err);                                   \
-    }                                                                  \
-  } while (0)
-
-#define WEBRTC_CA_LOG_WARN(expr)                                         \
-  do {                                                                   \
-    err = expr;                                                          \
-    if (err != noErr) {                                                  \
-      logCAMsg(rtc::LS_WARNING, "Error in " #expr,                       \
-               (const char*) & err);                                     \
-    }                                                                    \
-  } while (0)
-
 AudioMixerManagerMac::AudioMixerManagerMac()
     : _inputDeviceID(kAudioObjectUnknown),
       _outputDeviceID(kAudioObjectUnknown),
@@ -876,8 +873,8 @@
 
 // CoreAudio errors are best interpreted as four character strings.
 void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev,
-                              const char* msg,
-                              const char* err) {
+                                    const char* msg,
+                                    const char* err) {
   RTC_DCHECK(msg != NULL);
   RTC_DCHECK(err != NULL);
   RTC_DCHECK(sev == rtc::LS_ERROR || sev == rtc::LS_WARNING);
@@ -895,7 +892,7 @@
   }
 #else
   // We need to flip the characters in this case.
-   switch (sev) {
+  switch (sev) {
     case rtc::LS_ERROR:
       LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
       break;
diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc
index 6fcbb6e..32c3f94 100644
--- a/modules/audio_device/win/audio_device_core_win.cc
+++ b/modules/audio_device/win/audio_device_core_win.cc
@@ -8,13 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#pragma warning(disable: 4995)  // name was marked as #pragma deprecated
+#pragma warning(disable : 4995)  // name was marked as #pragma deprecated
 
 #if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
 // Reports the major and minor versions of the compiler.
-// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version 13 and a 1.0 point release.
-// The Visual C++ 2005 compiler version is 1400.
-// Type cl /? at the command line to see the major and minor versions of your compiler along with the build number.
+// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version
+// 13 and a 1.0 point release. The Visual C++ 2005 compiler version is 1400.
+// Type cl /? at the command line to see the major and minor versions of your
+// compiler along with the build number.
 #pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
 #endif
 
@@ -27,13 +28,13 @@
 #include <assert.h>
 #include <string.h>
 
-#include <windows.h>
+#include <Functiondiscoverykeys_devpkey.h>
 #include <comdef.h>
 #include <dmo.h>
-#include <Functiondiscoverykeys_devpkey.h>
 #include <mmsystem.h>
 #include <strsafe.h>
 #include <uuids.h>
+#include <windows.h>
 
 #include <iomanip>
 
@@ -42,25 +43,38 @@
 #include "system_wrappers/include/sleep.h"
 
 // Macro that calls a COM method returning HRESULT value.
-#define EXIT_ON_ERROR(hres)    do { if (FAILED(hres)) goto Exit; } while(0)
+#define EXIT_ON_ERROR(hres) \
+  do {                      \
+    if (FAILED(hres))       \
+      goto Exit;            \
+  } while (0)
 
 // Macro that continues to a COM error.
-#define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
+#define CONTINUE_ON_ERROR(hres) \
+  do {                          \
+    if (FAILED(hres))           \
+      goto Next;                \
+  } while (0)
 
 // Macro that releases a COM object if not NULL.
-#define SAFE_RELEASE(p)     do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
+#define SAFE_RELEASE(p) \
+  do {                  \
+    if ((p)) {          \
+      (p)->Release();   \
+      (p) = NULL;       \
+    }                   \
+  } while (0)
 
-#define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
+#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5))
 
 // REFERENCE_TIME time units per millisecond
-#define REFTIMES_PER_MILLISEC  10000
+#define REFTIMES_PER_MILLISEC 10000
 
-typedef struct tagTHREADNAME_INFO
-{
-   DWORD dwType;        // must be 0x1000
-   LPCSTR szName;       // pointer to name (in user addr space)
-   DWORD dwThreadID;    // thread ID (-1=caller thread)
-   DWORD dwFlags;       // reserved for future use, must be zero
+typedef struct tagTHREADNAME_INFO {
+  DWORD dwType;      // must be 0x1000
+  LPCSTR szName;     // pointer to name (in user addr space)
+  DWORD dwThreadID;  // thread ID (-1=caller thread)
+  DWORD dwFlags;     // reserved for future use, must be zero
 } THREADNAME_INFO;
 
 namespace webrtc {
@@ -68,11 +82,7 @@
 
 enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
 
-enum
-{
-    kAecCaptureStreamIndex = 0,
-    kAecRenderStreamIndex = 1
-};
+enum { kAecCaptureStreamIndex = 0, kAecRenderStreamIndex = 1 };
 
 // An implementation of IMediaBuffer, as required for
 // IMediaObject::ProcessOutput(). After consuming data provided by
@@ -80,95 +90,75 @@
 //
 // Example implementation:
 // http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
-class MediaBufferImpl : public IMediaBuffer
-{
-public:
-    explicit MediaBufferImpl(DWORD maxLength)
-        : _data(new BYTE[maxLength]),
-          _length(0),
-          _maxLength(maxLength),
-          _refCount(0)
-    {}
+class MediaBufferImpl : public IMediaBuffer {
+ public:
+  explicit MediaBufferImpl(DWORD maxLength)
+      : _data(new BYTE[maxLength]),
+        _length(0),
+        _maxLength(maxLength),
+        _refCount(0) {}
 
-    // IMediaBuffer methods.
-    STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength))
-    {
-        if (!ppBuffer || !pcbLength)
-        {
-            return E_POINTER;
-        }
-
-        *ppBuffer = _data;
-        *pcbLength = _length;
-
-        return S_OK;
+  // IMediaBuffer methods.
+  STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) {
+    if (!ppBuffer || !pcbLength) {
+      return E_POINTER;
     }
 
-    STDMETHOD(GetMaxLength(DWORD* pcbMaxLength))
-    {
-        if (!pcbMaxLength)
-        {
-            return E_POINTER;
-        }
+    *ppBuffer = _data;
+    *pcbLength = _length;
 
-        *pcbMaxLength = _maxLength;
-        return S_OK;
+    return S_OK;
+  }
+
+  STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) {
+    if (!pcbMaxLength) {
+      return E_POINTER;
     }
 
-    STDMETHOD(SetLength(DWORD cbLength))
-    {
-        if (cbLength > _maxLength)
-        {
-            return E_INVALIDARG;
-        }
+    *pcbMaxLength = _maxLength;
+    return S_OK;
+  }
 
-        _length = cbLength;
-        return S_OK;
+  STDMETHOD(SetLength(DWORD cbLength)) {
+    if (cbLength > _maxLength) {
+      return E_INVALIDARG;
     }
 
-    // IUnknown methods.
-    STDMETHOD_(ULONG, AddRef())
-    {
-        return InterlockedIncrement(&_refCount);
+    _length = cbLength;
+    return S_OK;
+  }
+
+  // IUnknown methods.
+  STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); }
+
+  STDMETHOD(QueryInterface(REFIID riid, void** ppv)) {
+    if (!ppv) {
+      return E_POINTER;
+    } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) {
+      return E_NOINTERFACE;
     }
 
-    STDMETHOD(QueryInterface(REFIID riid, void** ppv))
-    {
-        if (!ppv)
-        {
-            return E_POINTER;
-        }
-        else if (riid != IID_IMediaBuffer && riid != IID_IUnknown)
-        {
-            return E_NOINTERFACE;
-        }
+    *ppv = static_cast<IMediaBuffer*>(this);
+    AddRef();
+    return S_OK;
+  }
 
-        *ppv = static_cast<IMediaBuffer*>(this);
-        AddRef();
-        return S_OK;
+  STDMETHOD_(ULONG, Release()) {
+    LONG refCount = InterlockedDecrement(&_refCount);
+    if (refCount == 0) {
+      delete this;
     }
 
-    STDMETHOD_(ULONG, Release())
-    {
-        LONG refCount = InterlockedDecrement(&_refCount);
-        if (refCount == 0)
-        {
-            delete this;
-        }
+    return refCount;
+  }
 
-        return refCount;
-    }
+ private:
+  ~MediaBufferImpl() { delete[] _data; }
 
-private:
-    ~MediaBufferImpl()
-    {
-        delete [] _data;
-    }
-
-    BYTE* _data;
-    DWORD _length;
-    const DWORD _maxLength;
-    LONG _refCount;
+  BYTE* _data;
+  DWORD _length;
+  const DWORD _maxLength;
+  LONG _refCount;
 };
 }  // namespace
 
@@ -180,228 +170,204 @@
 //  CoreAudioIsSupported
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::CoreAudioIsSupported()
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+bool AudioDeviceWindowsCore::CoreAudioIsSupported() {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    bool MMDeviceIsAvailable(false);
-    bool coreAudioIsSupported(false);
+  bool MMDeviceIsAvailable(false);
+  bool coreAudioIsSupported(false);
 
-    HRESULT hr(S_OK);
-    TCHAR buf[MAXERRORLENGTH];
-    TCHAR errorText[MAXERRORLENGTH];
+  HRESULT hr(S_OK);
+  TCHAR buf[MAXERRORLENGTH];
+  TCHAR errorText[MAXERRORLENGTH];
 
-    // 1) Check if Windows version is Vista SP1 or later.
-    //
-    // CoreAudio is only available on Vista SP1 and later.
-    //
-    OSVERSIONINFOEX osvi;
-    DWORDLONG dwlConditionMask = 0;
-    int op = VER_LESS_EQUAL;
+  // 1) Check if Windows version is Vista SP1 or later.
+  //
+  // CoreAudio is only available on Vista SP1 and later.
+  //
+  OSVERSIONINFOEX osvi;
+  DWORDLONG dwlConditionMask = 0;
+  int op = VER_LESS_EQUAL;
 
-    // Initialize the OSVERSIONINFOEX structure.
-    ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
-    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
-    osvi.dwMajorVersion = 6;
-    osvi.dwMinorVersion = 0;
-    osvi.wServicePackMajor = 0;
-    osvi.wServicePackMinor = 0;
-    osvi.wProductType = VER_NT_WORKSTATION;
+  // Initialize the OSVERSIONINFOEX structure.
+  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+  osvi.dwMajorVersion = 6;
+  osvi.dwMinorVersion = 0;
+  osvi.wServicePackMajor = 0;
+  osvi.wServicePackMinor = 0;
+  osvi.wProductType = VER_NT_WORKSTATION;
 
-    // Initialize the condition mask.
-    VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
-    VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
+  // Initialize the condition mask.
+  VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
+  VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
 
-    DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
-                       VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
-                       VER_PRODUCT_TYPE;
+  DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
+                     VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
+                     VER_PRODUCT_TYPE;
 
-    // Perform the test.
-    BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask,
-                                            dwlConditionMask);
-    if (isVistaRTMorXP != 0)
-    {
-        LOG(LS_VERBOSE)
-            << "*** Windows Core Audio is only supported on Vista SP1 or later"
-            << " => will revert to the Wave API ***";
-        return false;
+  // Perform the test.
+  BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask, dwlConditionMask);
+  if (isVistaRTMorXP != 0) {
+    LOG(LS_VERBOSE)
+        << "*** Windows Core Audio is only supported on Vista SP1 or later"
+        << " => will revert to the Wave API ***";
+    return false;
+  }
+
+  // 2) Initializes the COM library for use by the calling thread.
+
+  // The COM init wrapper sets the thread's concurrency model to MTA,
+  // and creates a new apartment for the thread if one is required. The
+  // wrapper also ensures that each call to CoInitializeEx is balanced
+  // by a corresponding call to CoUninitialize.
+  //
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    // Things will work even if an STA thread is calling this method but we
+    // want to ensure that MTA is used and therefore return false here.
+    return false;
+  }
+
+  // 3) Check if the MMDevice API is available.
+  //
+  // The Windows Multimedia Device (MMDevice) API enables audio clients to
+  // discover audio endpoint devices, determine their capabilities, and create
+  // driver instances for those devices.
+  // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+  // The MMDevice API consists of several interfaces. The first of these is the
+  // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice
+  // API, a client obtains a reference to the IMMDeviceEnumerator interface of a
+  // device-enumerator object by calling the CoCreateInstance function.
+  //
+  // Through the IMMDeviceEnumerator interface, the client can obtain references
+  // to the other interfaces in the MMDevice API. The MMDevice API implements
+  // the following interfaces:
+  //
+  // IMMDevice            Represents an audio device.
+  // IMMDeviceCollection  Represents a collection of audio devices.
+  // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
+  // IMMEndpoint          Represents an audio endpoint device.
+  //
+  IMMDeviceEnumerator* pIMMD(NULL);
+  const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
+  const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+
+  hr = CoCreateInstance(
+      CLSID_MMDeviceEnumerator,  // GUID value of MMDeviceEnumerator coclass
+      NULL, CLSCTX_ALL,
+      IID_IMMDeviceEnumerator,  // GUID value of the IMMDeviceEnumerator
+                                // interface
+      (void**)&pIMMD);
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                  << " Failed to create the required COM object (hr=" << hr
+                  << ")";
+    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                    << " CoCreateInstance(MMDeviceEnumerator) failed (hr=" << hr
+                    << ")";
+
+    const DWORD dwFlags =
+        FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+    const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+    // Gets the system's human readable message string for this HRESULT.
+    // All error message in English by default.
+    DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+                                           MAXERRORLENGTH, NULL);
+
+    assert(messageLength <= MAXERRORLENGTH);
+
+    // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+    for (; messageLength && ::isspace(errorText[messageLength - 1]);
+         --messageLength) {
+      errorText[messageLength - 1] = '\0';
     }
 
-    // 2) Initializes the COM library for use by the calling thread.
+    StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
+    StringCchCat(buf, MAXERRORLENGTH, errorText);
+    LOG(LS_VERBOSE) << buf;
+  } else {
+    MMDeviceIsAvailable = true;
+    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                    << " CoCreateInstance(MMDeviceEnumerator) succeeded (hr="
+                    << hr << ")";
+    SAFE_RELEASE(pIMMD);
+  }
 
-    // The COM init wrapper sets the thread's concurrency model to MTA,
-    // and creates a new apartment for the thread if one is required. The
-    // wrapper also ensures that each call to CoInitializeEx is balanced
-    // by a corresponding call to CoUninitialize.
-    //
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      // Things will work even if an STA thread is calling this method but we
-      // want to ensure that MTA is used and therefore return false here.
+  // 4) Verify that we can create and initialize our Core Audio class.
+  //
+  // Also, perform a limited "API test" to ensure that Core Audio is supported
+  // for all devices.
+  //
+  if (MMDeviceIsAvailable) {
+    coreAudioIsSupported = false;
+
+    AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore();
+    if (p == NULL) {
       return false;
     }
 
-    // 3) Check if the MMDevice API is available.
-    //
-    // The Windows Multimedia Device (MMDevice) API enables audio clients to
-    // discover audio endpoint devices, determine their capabilities, and create
-    // driver instances for those devices.
-    // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
-    // The MMDevice API consists of several interfaces. The first of these is the
-    // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice API,
-    // a client obtains a reference to the IMMDeviceEnumerator interface of a
-    // device-enumerator object by calling the CoCreateInstance function.
-    //
-    // Through the IMMDeviceEnumerator interface, the client can obtain references
-    // to the other interfaces in the MMDevice API. The MMDevice API implements
-    // the following interfaces:
-    //
-    // IMMDevice            Represents an audio device.
-    // IMMDeviceCollection  Represents a collection of audio devices.
-    // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
-    // IMMEndpoint          Represents an audio endpoint device.
-    //
-    IMMDeviceEnumerator* pIMMD(NULL);
-    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
-    const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+    int ok(0);
+    int temp_ok(0);
+    bool available(false);
 
-    hr = CoCreateInstance(
-            CLSID_MMDeviceEnumerator,   // GUID value of MMDeviceEnumerator coclass
-            NULL,
-            CLSCTX_ALL,
-            IID_IMMDeviceEnumerator,    // GUID value of the IMMDeviceEnumerator interface
-            (void**)&pIMMD );
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                      << " Failed to create the required COM object (hr="
-                      << hr << ")";
-        LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                        << " CoCreateInstance(MMDeviceEnumerator) failed (hr="
-                        << hr << ")";
-
-        const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
-                              FORMAT_MESSAGE_IGNORE_INSERTS;
-        const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
-
-        // Gets the system's human readable message string for this HRESULT.
-        // All error message in English by default.
-        DWORD messageLength = ::FormatMessageW(dwFlags,
-                                               0,
-                                               hr,
-                                               dwLangID,
-                                               errorText,
-                                               MAXERRORLENGTH,
-                                               NULL);
-
-        assert(messageLength <= MAXERRORLENGTH);
-
-        // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
-        for (; messageLength && ::isspace(errorText[messageLength - 1]);
-             --messageLength)
-        {
-            errorText[messageLength - 1] = '\0';
-        }
-
-        StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
-        StringCchCat(buf, MAXERRORLENGTH, errorText);
-        LOG(LS_VERBOSE) << buf;
-    }
-    else
-    {
-        MMDeviceIsAvailable = true;
-        LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-            << " CoCreateInstance(MMDeviceEnumerator) succeeded (hr=" << hr
-            << ")";
-        SAFE_RELEASE(pIMMD);
+    if (p->Init() != InitStatus::OK) {
+      ok |= -1;
     }
 
-    // 4) Verify that we can create and initialize our Core Audio class.
-    //
-    // Also, perform a limited "API test" to ensure that Core Audio is supported for all devices.
-    //
-    if (MMDeviceIsAvailable)
-    {
-        coreAudioIsSupported = false;
-
-        AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore();
-        if (p == NULL)
-        {
-            return false;
-        }
-
-        int ok(0);
-        int temp_ok(0);
-        bool available(false);
-
-        if (p->Init() != InitStatus::OK) {
-          ok |= -1;
-        }
-
-        int16_t numDevsRec = p->RecordingDevices();
-        for (uint16_t i = 0; i < numDevsRec; i++)
-        {
-            ok |= p->SetRecordingDevice(i);
-            temp_ok = p->RecordingIsAvailable(available);
-            ok |= temp_ok;
-            ok |= (available == false);
-            if (available)
-            {
-                ok |= p->InitMicrophone();
-            }
-            if (ok)
-            {
-                LOG(LS_WARNING)
-                    << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                    << " Failed to use Core Audio Recording for device id="
-                    << i;
-            }
-        }
-
-        int16_t numDevsPlay = p->PlayoutDevices();
-        for (uint16_t i = 0; i < numDevsPlay; i++)
-        {
-            ok |= p->SetPlayoutDevice(i);
-            temp_ok = p->PlayoutIsAvailable(available);
-            ok |= temp_ok;
-            ok |= (available == false);
-            if (available)
-            {
-                ok |= p->InitSpeaker();
-            }
-            if (ok)
-            {
-                LOG(LS_WARNING)
-                    << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
-                    << " Failed to use Core Audio Playout for device id=" << i;
-            }
-        }
-
-        ok |= p->Terminate();
-
-        if (ok == 0)
-        {
-            coreAudioIsSupported = true;
-        }
-
-        delete p;
+    int16_t numDevsRec = p->RecordingDevices();
+    for (uint16_t i = 0; i < numDevsRec; i++) {
+      ok |= p->SetRecordingDevice(i);
+      temp_ok = p->RecordingIsAvailable(available);
+      ok |= temp_ok;
+      ok |= (available == false);
+      if (available) {
+        ok |= p->InitMicrophone();
+      }
+      if (ok) {
+        LOG(LS_WARNING) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                        << " Failed to use Core Audio Recording for device id="
+                        << i;
+      }
     }
 
-    if (coreAudioIsSupported)
-    {
-        LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported"
-                        << " => will revert to the Wave API ***";
+    int16_t numDevsPlay = p->PlayoutDevices();
+    for (uint16_t i = 0; i < numDevsPlay; i++) {
+      ok |= p->SetPlayoutDevice(i);
+      temp_ok = p->PlayoutIsAvailable(available);
+      ok |= temp_ok;
+      ok |= (available == false);
+      if (available) {
+        ok |= p->InitSpeaker();
+      }
+      if (ok) {
+        LOG(LS_WARNING) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+                        << " Failed to use Core Audio Playout for device id="
+                        << i;
+      }
     }
 
-    return (coreAudioIsSupported);
+    ok |= p->Terminate();
+
+    if (ok == 0) {
+      coreAudioIsSupported = true;
+    }
+
+    delete p;
+  }
+
+  if (coreAudioIsSupported) {
+    LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
+  } else {
+    LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported"
+                    << " => will revert to the Wave API ***";
+  }
+
+  return (coreAudioIsSupported);
 }
 
 // ============================================================================
@@ -566,76 +532,62 @@
 //  AudioDeviceWindowsCore() - dtor
 // ----------------------------------------------------------------------------
 
-AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
-{
-    LOG(LS_INFO) << __FUNCTION__ << " destroyed";
+AudioDeviceWindowsCore::~AudioDeviceWindowsCore() {
+  LOG(LS_INFO) << __FUNCTION__ << " destroyed";
 
-    Terminate();
+  Terminate();
 
-    // The IMMDeviceEnumerator is created during construction. Must release
-    // it here and not in Terminate() since we don't recreate it in Init().
-    SAFE_RELEASE(_ptrEnumerator);
+  // The IMMDeviceEnumerator is created during construction. Must release
+  // it here and not in Terminate() since we don't recreate it in Init().
+  SAFE_RELEASE(_ptrEnumerator);
 
-    _ptrAudioBuffer = NULL;
+  _ptrAudioBuffer = NULL;
 
-    if (NULL != _hRenderSamplesReadyEvent)
-    {
-        CloseHandle(_hRenderSamplesReadyEvent);
-        _hRenderSamplesReadyEvent = NULL;
+  if (NULL != _hRenderSamplesReadyEvent) {
+    CloseHandle(_hRenderSamplesReadyEvent);
+    _hRenderSamplesReadyEvent = NULL;
+  }
+
+  if (NULL != _hCaptureSamplesReadyEvent) {
+    CloseHandle(_hCaptureSamplesReadyEvent);
+    _hCaptureSamplesReadyEvent = NULL;
+  }
+
+  if (NULL != _hRenderStartedEvent) {
+    CloseHandle(_hRenderStartedEvent);
+    _hRenderStartedEvent = NULL;
+  }
+
+  if (NULL != _hCaptureStartedEvent) {
+    CloseHandle(_hCaptureStartedEvent);
+    _hCaptureStartedEvent = NULL;
+  }
+
+  if (NULL != _hShutdownRenderEvent) {
+    CloseHandle(_hShutdownRenderEvent);
+    _hShutdownRenderEvent = NULL;
+  }
+
+  if (NULL != _hShutdownCaptureEvent) {
+    CloseHandle(_hShutdownCaptureEvent);
+    _hShutdownCaptureEvent = NULL;
+  }
+
+  if (NULL != _hSetCaptureVolumeEvent) {
+    CloseHandle(_hSetCaptureVolumeEvent);
+    _hSetCaptureVolumeEvent = NULL;
+  }
+
+  if (_avrtLibrary) {
+    BOOL freeOK = FreeLibrary(_avrtLibrary);
+    if (!freeOK) {
+      LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+                      << " failed to free the loaded Avrt DLL module correctly";
+    } else {
+      LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+                      << " the Avrt DLL module is now unloaded";
     }
-
-    if (NULL != _hCaptureSamplesReadyEvent)
-    {
-        CloseHandle(_hCaptureSamplesReadyEvent);
-        _hCaptureSamplesReadyEvent = NULL;
-    }
-
-    if (NULL != _hRenderStartedEvent)
-    {
-        CloseHandle(_hRenderStartedEvent);
-        _hRenderStartedEvent = NULL;
-    }
-
-    if (NULL != _hCaptureStartedEvent)
-    {
-        CloseHandle(_hCaptureStartedEvent);
-        _hCaptureStartedEvent = NULL;
-    }
-
-    if (NULL != _hShutdownRenderEvent)
-    {
-        CloseHandle(_hShutdownRenderEvent);
-        _hShutdownRenderEvent = NULL;
-    }
-
-    if (NULL != _hShutdownCaptureEvent)
-    {
-        CloseHandle(_hShutdownCaptureEvent);
-        _hShutdownCaptureEvent = NULL;
-    }
-
-    if (NULL != _hSetCaptureVolumeEvent)
-    {
-        CloseHandle(_hSetCaptureVolumeEvent);
-        _hSetCaptureVolumeEvent = NULL;
-    }
-
-    if (_avrtLibrary)
-    {
-        BOOL freeOK = FreeLibrary(_avrtLibrary);
-        if (!freeOK)
-        {
-            LOG(LS_WARNING)
-                << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
-                << " failed to free the loaded Avrt DLL module correctly";
-        }
-        else
-        {
-            LOG(LS_WARNING)
-                << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
-                << " the Avrt DLL module is now unloaded";
-        }
-    }
+  }
 }
 
 // ============================================================================
@@ -646,28 +598,26 @@
 //  AttachAudioBuffer
 // ----------------------------------------------------------------------------
 
-void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
-{
+void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  _ptrAudioBuffer = audioBuffer;
 
-    _ptrAudioBuffer = audioBuffer;
-
-    // Inform the AudioBuffer about default settings for this implementation.
-    // Set all values to zero here since the actual settings will be done by
-    // InitPlayout and InitRecording later.
-    _ptrAudioBuffer->SetRecordingSampleRate(0);
-    _ptrAudioBuffer->SetPlayoutSampleRate(0);
-    _ptrAudioBuffer->SetRecordingChannels(0);
-    _ptrAudioBuffer->SetPlayoutChannels(0);
+  // Inform the AudioBuffer about default settings for this implementation.
+  // Set all values to zero here since the actual settings will be done by
+  // InitPlayout and InitRecording later.
+  _ptrAudioBuffer->SetRecordingSampleRate(0);
+  _ptrAudioBuffer->SetPlayoutSampleRate(0);
+  _ptrAudioBuffer->SetRecordingChannels(0);
+  _ptrAudioBuffer->SetPlayoutChannels(0);
 }
 
 // ----------------------------------------------------------------------------
 //  ActiveAudioLayer
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
-{
-    audioLayer = AudioDeviceModule::kWindowsCoreAudio;
-    return 0;
+int32_t AudioDeviceWindowsCore::ActiveAudioLayer(
+    AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kWindowsCoreAudio;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -696,339 +646,297 @@
 //  Terminate
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::Terminate()
-{
+int32_t AudioDeviceWindowsCore::Terminate() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
-
-    if (!_initialized) {
-        return 0;
-    }
-
-    _initialized = false;
-    _speakerIsInitialized = false;
-    _microphoneIsInitialized = false;
-    _playing = false;
-    _recording = false;
-
-    SAFE_RELEASE(_ptrRenderCollection);
-    SAFE_RELEASE(_ptrCaptureCollection);
-    SAFE_RELEASE(_ptrDeviceOut);
-    SAFE_RELEASE(_ptrDeviceIn);
-    SAFE_RELEASE(_ptrClientOut);
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrRenderClient);
-    SAFE_RELEASE(_ptrCaptureClient);
-    SAFE_RELEASE(_ptrCaptureVolume);
-    SAFE_RELEASE(_ptrRenderSimpleVolume);
-
+  if (!_initialized) {
     return 0;
+  }
+
+  _initialized = false;
+  _speakerIsInitialized = false;
+  _microphoneIsInitialized = false;
+  _playing = false;
+  _recording = false;
+
+  SAFE_RELEASE(_ptrRenderCollection);
+  SAFE_RELEASE(_ptrCaptureCollection);
+  SAFE_RELEASE(_ptrDeviceOut);
+  SAFE_RELEASE(_ptrDeviceIn);
+  SAFE_RELEASE(_ptrClientOut);
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrRenderClient);
+  SAFE_RELEASE(_ptrCaptureClient);
+  SAFE_RELEASE(_ptrCaptureVolume);
+  SAFE_RELEASE(_ptrRenderSimpleVolume);
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  Initialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Initialized() const
-{
-    return (_initialized);
+bool AudioDeviceWindowsCore::Initialized() const {
+  return (_initialized);
 }
 
 // ----------------------------------------------------------------------------
 //  InitSpeaker
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitSpeaker()
-{
+int32_t AudioDeviceWindowsCore::InitSpeaker() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_playing) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  if (_usingOutputDeviceIndex) {
+    int16_t nDevices = PlayoutDevices();
+    if (_outputDeviceIndex > (nDevices - 1)) {
+      LOG(LS_ERROR) << "current device selection is invalid => unable to"
+                    << " initialize";
+      return -1;
     }
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  int32_t ret(0);
 
-    if (_usingOutputDeviceIndex)
-    {
-        int16_t nDevices = PlayoutDevices();
-        if (_outputDeviceIndex > (nDevices - 1))
-        {
-            LOG(LS_ERROR) << "current device selection is invalid => unable to"
-                          << " initialize";
-            return -1;
-        }
-    }
+  SAFE_RELEASE(_ptrDeviceOut);
+  if (_usingOutputDeviceIndex) {
+    // Refresh the selected rendering endpoint device using current index
+    ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
+  } else {
+    ERole role;
+    (_outputDevice == AudioDeviceModule::kDefaultDevice)
+        ? role = eConsole
+        : role = eCommunications;
+    // Refresh the selected rendering endpoint device using role
+    ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
+  }
 
-    int32_t ret(0);
-
+  if (ret != 0 || (_ptrDeviceOut == NULL)) {
+    LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
     SAFE_RELEASE(_ptrDeviceOut);
-    if (_usingOutputDeviceIndex)
-    {
-        // Refresh the selected rendering endpoint device using current index
-        ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
-    }
-    else
-    {
-        ERole role;
-        (_outputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
-        // Refresh the selected rendering endpoint device using role
-        ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
-    }
+    return -1;
+  }
 
-    if (ret != 0 || (_ptrDeviceOut == NULL))
-    {
-        LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
-
-    IAudioSessionManager* pManager = NULL;
-    ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
-                                  CLSCTX_ALL,
-                                  NULL,
-                                  (void**)&pManager);
-    if (ret != 0 || pManager == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the render manager";
-        SAFE_RELEASE(pManager);
-        return -1;
-    }
-
-    SAFE_RELEASE(_ptrRenderSimpleVolume);
-    ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
-    if (ret != 0 || _ptrRenderSimpleVolume == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the render simple volume";
-        SAFE_RELEASE(pManager);
-        SAFE_RELEASE(_ptrRenderSimpleVolume);
-        return -1;
-    }
+  IAudioSessionManager* pManager = NULL;
+  ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+                                NULL, (void**)&pManager);
+  if (ret != 0 || pManager == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the render manager";
     SAFE_RELEASE(pManager);
+    return -1;
+  }
 
-    _speakerIsInitialized = true;
+  SAFE_RELEASE(_ptrRenderSimpleVolume);
+  ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
+  if (ret != 0 || _ptrRenderSimpleVolume == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the render simple volume";
+    SAFE_RELEASE(pManager);
+    SAFE_RELEASE(_ptrRenderSimpleVolume);
+    return -1;
+  }
+  SAFE_RELEASE(pManager);
 
-    return 0;
+  _speakerIsInitialized = true;
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitMicrophone
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitMicrophone()
-{
+int32_t AudioDeviceWindowsCore::InitMicrophone() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_recording) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  if (_usingInputDeviceIndex) {
+    int16_t nDevices = RecordingDevices();
+    if (_inputDeviceIndex > (nDevices - 1)) {
+      LOG(LS_ERROR) << "current device selection is invalid => unable to"
+                    << " initialize";
+      return -1;
     }
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  int32_t ret(0);
 
-    if (_usingInputDeviceIndex)
-    {
-        int16_t nDevices = RecordingDevices();
-        if (_inputDeviceIndex > (nDevices - 1))
-        {
-            LOG(LS_ERROR) << "current device selection is invalid => unable to"
-                          << " initialize";
-            return -1;
-        }
-    }
+  SAFE_RELEASE(_ptrDeviceIn);
+  if (_usingInputDeviceIndex) {
+    // Refresh the selected capture endpoint device using current index
+    ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
+  } else {
+    ERole role;
+    (_inputDevice == AudioDeviceModule::kDefaultDevice)
+        ? role = eConsole
+        : role = eCommunications;
+    // Refresh the selected capture endpoint device using role
+    ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
+  }
 
-    int32_t ret(0);
-
+  if (ret != 0 || (_ptrDeviceIn == NULL)) {
+    LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
     SAFE_RELEASE(_ptrDeviceIn);
-    if (_usingInputDeviceIndex)
-    {
-        // Refresh the selected capture endpoint device using current index
-        ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
-    }
-    else
-    {
-        ERole role;
-        (_inputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
-        // Refresh the selected capture endpoint device using role
-        ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
-    }
+    return -1;
+  }
 
-    if (ret != 0 || (_ptrDeviceIn == NULL))
-    {
-        LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
-
+  SAFE_RELEASE(_ptrCaptureVolume);
+  ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&_ptrCaptureVolume));
+  if (ret != 0 || _ptrCaptureVolume == NULL) {
+    LOG(LS_ERROR) << "failed to initialize the capture volume";
     SAFE_RELEASE(_ptrCaptureVolume);
-    ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
-                                 CLSCTX_ALL,
-                                 NULL,
-                                 reinterpret_cast<void **>(&_ptrCaptureVolume));
-    if (ret != 0 || _ptrCaptureVolume == NULL)
-    {
-        LOG(LS_ERROR) << "failed to initialize the capture volume";
-        SAFE_RELEASE(_ptrCaptureVolume);
-        return -1;
-    }
+    return -1;
+  }
 
-    _microphoneIsInitialized = true;
+  _microphoneIsInitialized = true;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::SpeakerIsInitialized() const
-{
-
-    return (_speakerIsInitialized);
+bool AudioDeviceWindowsCore::SpeakerIsInitialized() const {
+  return (_speakerIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const
-{
-
-    return (_microphoneIsInitialized);
+bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const {
+  return (_microphoneIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerVolumeIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioSessionManager* pManager = NULL;
+  ISimpleAudioVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioSessionManager* pManager = NULL;
-    ISimpleAudioVolume* pVolume = NULL;
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL,
+                               (void**)&pManager);
+  EXIT_ON_ERROR(hr);
 
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
-    EXIT_ON_ERROR(hr);
+  hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
+  EXIT_ON_ERROR(hr);
 
-    hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
-    EXIT_ON_ERROR(hr);
+  float volume(0.0f);
+  hr = pVolume->GetMasterVolume(&volume);
+  if (FAILED(hr)) {
+    available = false;
+  }
+  available = true;
 
-    float volume(0.0f);
-    hr = pVolume->GetMasterVolume(&volume);
-    if (FAILED(hr))
-    {
-        available = false;
-    }
-    available = true;
+  SAFE_RELEASE(pManager);
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pManager);
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pManager);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pManager);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetSpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
-{
+int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume) {
+  {
+    rtc::CritScope lock(&_critSect);
 
-    {
-        rtc::CritScope lock(&_critSect);
-
-        if (!_speakerIsInitialized)
-        {
-        return -1;
-        }
-
-        if (_ptrDeviceOut == NULL)
-        {
-            return -1;
-        }
+    if (!_speakerIsInitialized) {
+      return -1;
     }
 
-    if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
-        volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME)
-    {
-        return -1;
+    if (_ptrDeviceOut == NULL) {
+      return -1;
     }
+  }
 
-    HRESULT hr = S_OK;
+  if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
+      volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME) {
+    return -1;
+  }
 
-    // scale input volume to valid range (0.0 to 1.0)
-    const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
-    _volumeMutex.Enter();
-    hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  HRESULT hr = S_OK;
 
-    return 0;
+  // scale input volume to valid range (0.0 to 1.0)
+  const float fLevel = (float)volume / MAX_CORE_SPEAKER_VOLUME;
+  _volumeMutex.Enter();
+  hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, NULL);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const {
+  {
+    rtc::CritScope lock(&_critSect);
 
-    {
-        rtc::CritScope lock(&_critSect);
-
-        if (!_speakerIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceOut == NULL)
-        {
-            return -1;
-        }
+    if (!_speakerIsInitialized) {
+      return -1;
     }
 
-    HRESULT hr = S_OK;
-    float fLevel(0.0f);
+    if (_ptrDeviceOut == NULL) {
+      return -1;
+    }
+  }
 
-    _volumeMutex.Enter();
-    hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  HRESULT hr = S_OK;
+  float fLevel(0.0f);
 
-    // scale input volume range [0.0,1.0] to valid output range
-    volume = static_cast<uint32_t> (fLevel*MAX_CORE_SPEAKER_VOLUME);
+  _volumeMutex.Enter();
+  hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
 
-    return 0;
+  // scale input volume range [0.0,1.0] to valid output range
+  volume = static_cast<uint32_t>(fLevel * MAX_CORE_SPEAKER_VOLUME);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
@@ -1040,503 +948,451 @@
 //  how it is used today in VoE.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
+int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  maxVolume = static_cast<uint32_t>(MAX_CORE_SPEAKER_VOLUME);
 
-    maxVolume = static_cast<uint32_t> (MAX_CORE_SPEAKER_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  MinSpeakerVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const
-{
+int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(MIN_CORE_SPEAKER_VOLUME);
 
-    minVolume = static_cast<uint32_t> (MIN_CORE_SPEAKER_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerMuteIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
-        CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  if (FAILED(hr))
+    available = false;
+  else
+    available = true;
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    if (FAILED(hr))
-        available = false;
-    else
-        available = true;
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetSpeakerMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Set the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Set the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  const BOOL mute(enable);
+  hr = pVolume->SetMute(mute, NULL);
+  EXIT_ON_ERROR(hr);
 
-    const BOOL mute(enable);
-    hr = pVolume->SetMute(mute, NULL);
-    EXIT_ON_ERROR(hr);
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SpeakerMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const {
+  if (!_speakerIsInitialized) {
+    return -1;
+  }
 
-    if (!_speakerIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the speaker system mute state.
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                               reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the speaker system mute state.
-    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  EXIT_ON_ERROR(hr);
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    EXIT_ON_ERROR(hr);
+  enabled = (mute == TRUE) ? true : false;
 
-    enabled = (mute == TRUE) ? true : false;
+  SAFE_RELEASE(pVolume);
 
-    SAFE_RELEASE(pVolume);
-
-    return 0;
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMuteIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  if (FAILED(hr))
+    available = false;
+  else
+    available = true;
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    if (FAILED(hr))
-        available = false;
-    else
-        available = true;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetMicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable) {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Set the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Set the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  const BOOL mute(enable);
+  hr = pVolume->SetMute(mute, NULL);
+  EXIT_ON_ERROR(hr);
 
-    const BOOL mute(enable);
-    hr = pVolume->SetMute(mute, NULL);
-    EXIT_ON_ERROR(hr);
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneMute
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  // Query the microphone system mute state.
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    // Query the microphone system mute state.
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  BOOL mute;
+  hr = pVolume->GetMute(&mute);
+  EXIT_ON_ERROR(hr);
 
-    BOOL mute;
-    hr = pVolume->GetMute(&mute);
-    EXIT_ON_ERROR(hr);
+  enabled = (mute == TRUE) ? true : false;
 
-    enabled = (mute == TRUE) ? true : false;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoRecordingIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available)
-{
-
-    available = true;
-    return 0;
+int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available) {
+  available = true;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetStereoRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (enable) {
+    _recChannelsPrioList[0] = 2;  // try stereo first
+    _recChannelsPrioList[1] = 1;
+    _recChannels = 2;
+  } else {
+    _recChannelsPrioList[0] = 1;  // try mono first
+    _recChannelsPrioList[1] = 2;
+    _recChannels = 1;
+  }
 
-    if (enable)
-    {
-        _recChannelsPrioList[0] = 2;    // try stereo first
-        _recChannelsPrioList[1] = 1;
-        _recChannels = 2;
-    }
-    else
-    {
-        _recChannelsPrioList[0] = 1;    // try mono first
-        _recChannelsPrioList[1] = 2;
-        _recChannels = 1;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const {
+  if (_recChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    if (_recChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoPlayoutIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available)
-{
-
-    available = true;
-    return 0;
+int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available) {
+  available = true;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetStereoPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable)
-{
+int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (enable) {
+    _playChannelsPrioList[0] = 2;  // try stereo first
+    _playChannelsPrioList[1] = 1;
+    _playChannels = 2;
+  } else {
+    _playChannelsPrioList[0] = 1;  // try mono first
+    _playChannelsPrioList[1] = 2;
+    _playChannels = 1;
+  }
 
-    if (enable)
-    {
-        _playChannelsPrioList[0] = 2;    // try stereo first
-        _playChannelsPrioList[1] = 1;
-        _playChannels = 2;
-    }
-    else
-    {
-        _playChannelsPrioList[0] = 1;    // try mono first
-        _playChannelsPrioList[1] = 2;
-        _playChannels = 1;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StereoPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const
-{
+int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const {
+  if (_playChannels == 2)
+    enabled = true;
+  else
+    enabled = false;
 
-    if (_playChannels == 2)
-        enabled = true;
-    else
-        enabled = false;
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetAGC
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetAGC(bool enable)
-{
-    rtc::CritScope lock(&_critSect);
-    _AGC = enable;
-    return 0;
+int32_t AudioDeviceWindowsCore::SetAGC(bool enable) {
+  rtc::CritScope lock(&_critSect);
+  _AGC = enable;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  AGC
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::AGC() const
-{
-    rtc::CritScope lock(&_critSect);
-    return _AGC;
+bool AudioDeviceWindowsCore::AGC() const {
+  rtc::CritScope lock(&_critSect);
+  return _AGC;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneVolumeIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available) {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
 
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
+  HRESULT hr = S_OK;
+  IAudioEndpointVolume* pVolume = NULL;
 
-    HRESULT hr = S_OK;
-    IAudioEndpointVolume* pVolume = NULL;
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                              reinterpret_cast<void**>(&pVolume));
+  EXIT_ON_ERROR(hr);
 
-    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
-    EXIT_ON_ERROR(hr);
+  float volume(0.0f);
+  hr = pVolume->GetMasterVolumeLevelScalar(&volume);
+  if (FAILED(hr)) {
+    available = false;
+  }
+  available = true;
 
-    float volume(0.0f);
-    hr = pVolume->GetMasterVolumeLevelScalar(&volume);
-    if (FAILED(hr))
-    {
-        available = false;
-    }
-    available = true;
-
-    SAFE_RELEASE(pVolume);
-    return 0;
+  SAFE_RELEASE(pVolume);
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    SAFE_RELEASE(pVolume);
-    return -1;
+  _TraceCOMError(hr);
+  SAFE_RELEASE(pVolume);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetMicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
-{
-    LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
-                    << volume << ")";
+int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume) {
+  LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
+                  << volume << ")";
 
-    {
-        rtc::CritScope lock(&_critSect);
+  {
+    rtc::CritScope lock(&_critSect);
 
-        if (!_microphoneIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceIn == NULL)
-        {
-            return -1;
-        }
+    if (!_microphoneIsInitialized) {
+      return -1;
     }
 
-    if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
-        volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME))
-    {
-        return -1;
+    if (_ptrDeviceIn == NULL) {
+      return -1;
     }
+  }
 
-    HRESULT hr = S_OK;
-    // scale input volume to valid range (0.0 to 1.0)
-    const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
-    _volumeMutex.Enter();
-    _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+  if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
+      volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME)) {
+    return -1;
+  }
 
-    return 0;
+  HRESULT hr = S_OK;
+  // scale input volume to valid range (0.0 to 1.0)
+  const float fLevel = static_cast<float>(volume) / MAX_CORE_MICROPHONE_VOLUME;
+  _volumeMutex.Enter();
+  _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  MicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
-{
-    {
-        rtc::CritScope lock(&_critSect);
+int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const {
+  {
+    rtc::CritScope lock(&_critSect);
 
-        if (!_microphoneIsInitialized)
-        {
-            return -1;
-        }
-
-        if (_ptrDeviceIn == NULL)
-        {
-            return -1;
-        }
+    if (!_microphoneIsInitialized) {
+      return -1;
     }
 
-    HRESULT hr = S_OK;
-    float fLevel(0.0f);
-    volume = 0;
-    _volumeMutex.Enter();
-    hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
-    _volumeMutex.Leave();
-    EXIT_ON_ERROR(hr);
+    if (_ptrDeviceIn == NULL) {
+      return -1;
+    }
+  }
 
-    // scale input volume range [0.0,1.0] to valid output range
-    volume = static_cast<uint32_t> (fLevel*MAX_CORE_MICROPHONE_VOLUME);
+  HRESULT hr = S_OK;
+  float fLevel(0.0f);
+  volume = 0;
+  _volumeMutex.Enter();
+  hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
+  _volumeMutex.Leave();
+  EXIT_ON_ERROR(hr);
 
-    return 0;
+  // scale input volume range [0.0,1.0] to valid output range
+  volume = static_cast<uint32_t>(fLevel * MAX_CORE_MICROPHONE_VOLUME);
+
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    return -1;
+  _TraceCOMError(hr);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
@@ -1548,166 +1404,142 @@
 //  how it is used today in VoE.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    maxVolume = static_cast<uint32_t> (MAX_CORE_MICROPHONE_VOLUME);
+  maxVolume = static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME);
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  MinMicrophoneVolume
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const
-{
+int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const {
+  if (!_microphoneIsInitialized) {
+    return -1;
+  }
 
-    if (!_microphoneIsInitialized)
-    {
-        return -1;
-    }
+  minVolume = static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME);
 
-    minVolume = static_cast<uint32_t> (MIN_CORE_MICROPHONE_VOLUME);
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutDevices
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::PlayoutDevices()
-{
+int16_t AudioDeviceWindowsCore::PlayoutDevices() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_RefreshDeviceList(eRender) != -1) {
+    return (_DeviceListCount(eRender));
+  }
 
-    if (_RefreshDeviceList(eRender) != -1)
-    {
-        return (_DeviceListCount(eRender));
-    }
-
-    return -1;
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetPlayoutDevice I (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
-{
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) {
+  if (_playIsInitialized) {
+    return -1;
+  }
 
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+  // Get current number of available rendering endpoint devices and refresh the
+  // rendering collection.
+  UINT nDevices = PlayoutDevices();
 
-    // Get current number of available rendering endpoint devices and refresh the rendering collection.
-    UINT nDevices = PlayoutDevices();
+  if (index < 0 || index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index < 0 || index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  HRESULT hr(S_OK);
 
-    HRESULT hr(S_OK);
+  assert(_ptrRenderCollection != NULL);
 
-    assert(_ptrRenderCollection != NULL);
-
-    //  Select an endpoint rendering device given the specified index
+  //  Select an endpoint rendering device given the specified index
+  SAFE_RELEASE(_ptrDeviceOut);
+  hr = _ptrRenderCollection->Item(index, &_ptrDeviceOut);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceOut);
-    hr = _ptrRenderCollection->Item(
-                                 index,
-                                 &_ptrDeviceOut);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingOutputDeviceIndex = true;
-    _outputDeviceIndex = index;
+  _usingOutputDeviceIndex = true;
+  _outputDeviceIndex = index;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetPlayoutDevice II (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
-{
-    if (_playIsInitialized)
-    {
-        return -1;
-    }
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(
+    AudioDeviceModule::WindowsDeviceType device) {
+  if (_playIsInitialized) {
+    return -1;
+  }
 
-    ERole role(eCommunications);
+  ERole role(eCommunications);
 
-    if (device == AudioDeviceModule::kDefaultDevice)
-    {
-        role = eConsole;
-    }
-    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
-    {
-        role = eCommunications;
-    }
+  if (device == AudioDeviceModule::kDefaultDevice) {
+    role = eConsole;
+  } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+    role = eCommunications;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    // Refresh the list of rendering endpoint devices
-    _RefreshDeviceList(eRender);
+  // Refresh the list of rendering endpoint devices
+  _RefreshDeviceList(eRender);
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    //  Select an endpoint rendering device given the specified role
+  //  Select an endpoint rendering device given the specified role
+  SAFE_RELEASE(_ptrDeviceOut);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(eRender, role, &_ptrDeviceOut);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceOut);
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           eRender,
-                           role,
-                           &_ptrDeviceOut);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceOut);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingOutputDeviceIndex = false;
-    _outputDevice = device;
+  _usingOutputDeviceIndex = false;
+  _outputDevice = device;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -1717,81 +1549,70 @@
 int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  bool defaultCommunicationDevice(false);
+  const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
 
-    bool defaultCommunicationDevice(false);
-    const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
+  // Special fix for the case when the user selects '-1' as index (<=> Default
+  // Communication Device)
+  if (index == (uint16_t)(-1)) {
+    defaultCommunicationDevice = true;
+    index = 0;
+    LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  }
 
-    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
-    if (index == (uint16_t)(-1))
-    {
-        defaultCommunicationDevice = true;
-        index = 0;
-        LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
+
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  int32_t ret(-1);
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+  // Get the endpoint device's friendly-name
+  if (defaultCommunicationDevice) {
+    ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName,
+                                bufferLen);
+  } else {
+    ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
+  }
+
+  if (ret == 0) {
+    // Convert the endpoint device's friendly-name to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+                            kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
+  // Get the endpoint ID string (uniquely identifies the device among all audio
+  // endpoint devices)
+  if (defaultCommunicationDevice) {
+    ret =
+        _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
+  } else {
+    ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
+  }
+
+  if (guid != NULL && ret == 0) {
+    // Convert the endpoint device's ID string to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+                            NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
-
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    rtc::CritScope lock(&_critSect);
-
-    int32_t ret(-1);
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
-
-    // Get the endpoint device's friendly-name
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
-    }
-
-    if (ret == 0)
-    {
-        // Convert the endpoint device's friendly-name to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
-    }
-
-    if (guid != NULL && ret == 0)
-    {
-        // Convert the endpoint device's ID string to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    return ret;
+  return ret;
 }
 
 // ----------------------------------------------------------------------------
@@ -1801,1244 +1622,1081 @@
 int32_t AudioDeviceWindowsCore::RecordingDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
-    char guid[kAdmMaxGuidSize])
-{
+    char guid[kAdmMaxGuidSize]) {
+  bool defaultCommunicationDevice(false);
+  const int16_t nDevices(
+      RecordingDevices());  // also updates the list of devices
 
-    bool defaultCommunicationDevice(false);
-    const int16_t nDevices(RecordingDevices());  // also updates the list of devices
+  // Special fix for the case when the user selects '-1' as index (<=> Default
+  // Communication Device)
+  if (index == (uint16_t)(-1)) {
+    defaultCommunicationDevice = true;
+    index = 0;
+    LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  }
 
-    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
-    if (index == (uint16_t)(-1))
-    {
-        defaultCommunicationDevice = true;
-        index = 0;
-        LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+  if ((index > (nDevices - 1)) || (name == NULL)) {
+    return -1;
+  }
+
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid != NULL) {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
+
+  rtc::CritScope lock(&_critSect);
+
+  int32_t ret(-1);
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+  // Get the endpoint device's friendly-name
+  if (defaultCommunicationDevice) {
+    ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName,
+                                bufferLen);
+  } else {
+    ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
+  }
+
+  if (ret == 0) {
+    // Convert the endpoint device's friendly-name to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+                            kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    if ((index > (nDevices-1)) || (name == NULL))
-    {
-        return -1;
+  // Get the endpoint ID string (uniquely identifies the device among all audio
+  // endpoint devices)
+  if (defaultCommunicationDevice) {
+    ret =
+        _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
+  } else {
+    ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
+  }
+
+  if (guid != NULL && ret == 0) {
+    // Convert the endpoint device's ID string to UTF-8
+    if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+                            NULL, NULL) == 0) {
+      LOG(LS_ERROR) << "WideCharToMultiByte(CP_UTF8) failed with error code "
+                    << GetLastError();
     }
+  }
 
-    memset(name, 0, kAdmMaxDeviceNameSize);
-
-    if (guid != NULL)
-    {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    rtc::CritScope lock(&_critSect);
-
-    int32_t ret(-1);
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
-
-    // Get the endpoint device's friendly-name
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
-    }
-
-    if (ret == 0)
-    {
-        // Convert the endpoint device's friendly-name to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-    if (defaultCommunicationDevice)
-    {
-        ret = _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
-    }
-    else
-    {
-        ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
-    }
-
-    if (guid != NULL && ret == 0)
-    {
-        // Convert the endpoint device's ID string to UTF-8
-        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
-        {
-            LOG(LS_ERROR)
-                << "WideCharToMultiByte(CP_UTF8) failed with error code "
-                << GetLastError();
-        }
-    }
-
-    return ret;
+  return ret;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingDevices
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::RecordingDevices()
-{
+int16_t AudioDeviceWindowsCore::RecordingDevices() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_RefreshDeviceList(eCapture) != -1) {
+    return (_DeviceListCount(eCapture));
+  }
 
-    if (_RefreshDeviceList(eCapture) != -1)
-    {
-        return (_DeviceListCount(eCapture));
-    }
-
-    return -1;
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  SetRecordingDevice I (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
-{
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+  // Get current number of available capture endpoint devices and refresh the
+  // capture collection.
+  UINT nDevices = RecordingDevices();
 
-    // Get current number of available capture endpoint devices and refresh the capture collection.
-    UINT nDevices = RecordingDevices();
+  if (index < 0 || index > (nDevices - 1)) {
+    LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+                  << "]";
+    return -1;
+  }
 
-    if (index < 0 || index > (nDevices-1))
-    {
-        LOG(LS_ERROR) << "device index is out of range [0," << (nDevices-1)
-                      << "]";
-        return -1;
-    }
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  HRESULT hr(S_OK);
 
-    HRESULT hr(S_OK);
+  assert(_ptrCaptureCollection != NULL);
 
-    assert(_ptrCaptureCollection != NULL);
-
-    // Select an endpoint capture device given the specified index
+  // Select an endpoint capture device given the specified index
+  SAFE_RELEASE(_ptrDeviceIn);
+  hr = _ptrCaptureCollection->Item(index, &_ptrDeviceIn);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceIn);
-    hr = _ptrCaptureCollection->Item(
-                                 index,
-                                 &_ptrDeviceIn);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingInputDeviceIndex = true;
-    _inputDeviceIndex = index;
+  _usingInputDeviceIndex = true;
+  _inputDeviceIndex = index;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  SetRecordingDevice II (II)
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
-{
-    if (_recIsInitialized)
-    {
-        return -1;
-    }
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(
+    AudioDeviceModule::WindowsDeviceType device) {
+  if (_recIsInitialized) {
+    return -1;
+  }
 
-    ERole role(eCommunications);
+  ERole role(eCommunications);
 
-    if (device == AudioDeviceModule::kDefaultDevice)
-    {
-        role = eConsole;
-    }
-    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
-    {
-        role = eCommunications;
-    }
+  if (device == AudioDeviceModule::kDefaultDevice) {
+    role = eConsole;
+  } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+    role = eCommunications;
+  }
 
-    rtc::CritScope lock(&_critSect);
+  rtc::CritScope lock(&_critSect);
 
-    // Refresh the list of capture endpoint devices
-    _RefreshDeviceList(eCapture);
+  // Refresh the list of capture endpoint devices
+  _RefreshDeviceList(eCapture);
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    //  Select an endpoint capture device given the specified role
+  //  Select an endpoint capture device given the specified role
+  SAFE_RELEASE(_ptrDeviceIn);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(eCapture, role, &_ptrDeviceIn);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(_ptrDeviceIn);
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           eCapture,
-                           role,
-                           &_ptrDeviceIn);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(_ptrDeviceIn);
-        return -1;
-    }
+    return -1;
+  }
 
-    WCHAR szDeviceName[MAX_PATH];
-    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
+  WCHAR szDeviceName[MAX_PATH];
+  const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
 
-    // Get the endpoint device's friendly-name
-    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
-    {
-        LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
-    }
+  // Get the endpoint device's friendly-name
+  if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+    LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+  }
 
-    _usingInputDeviceIndex = false;
-    _inputDevice = device;
+  _usingInputDeviceIndex = false;
+  _inputDevice = device;
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available) {
+  available = false;
 
-    available = false;
+  // Try to initialize the playout side
+  int32_t res = InitPlayout();
 
-    // Try to initialize the playout side
-    int32_t res = InitPlayout();
+  // Cancel effect of initialization
+  StopPlayout();
 
-    // Cancel effect of initialization
-    StopPlayout();
+  if (res != -1) {
+    available = true;
+  }
 
-    if (res != -1)
-    {
-        available = true;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingIsAvailable
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available)
-{
+int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available) {
+  available = false;
 
-    available = false;
+  // Try to initialize the recording side
+  int32_t res = InitRecording();
 
-    // Try to initialize the recording side
-    int32_t res = InitRecording();
+  // Cancel effect of initialization
+  StopRecording();
 
-    // Cancel effect of initialization
-    StopRecording();
+  if (res != -1) {
+    available = true;
+  }
 
-    if (res != -1)
-    {
-        available = true;
-    }
-
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitPlayout()
-{
+int32_t AudioDeviceWindowsCore::InitPlayout() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_playing) {
+    return -1;
+  }
 
-    if (_playing)
-    {
-        return -1;
-    }
-
-    if (_playIsInitialized)
-    {
-        return 0;
-    }
-
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
-
-    // Initialize the speaker (devices might have been added or removed)
-    if (InitSpeaker() == -1)
-    {
-        LOG(LS_WARNING) << "InitSpeaker() failed";
-    }
-
-    // Ensure that the updated rendering endpoint device is valid
-    if (_ptrDeviceOut == NULL)
-    {
-        return -1;
-    }
-
-    if (_builtInAecEnabled && _recIsInitialized)
-    {
-        // Ensure the correct render device is configured in case
-        // InitRecording() was called before InitPlayout().
-        if (SetDMOProperties() == -1)
-        {
-            return -1;
-        }
-    }
-
-    HRESULT hr = S_OK;
-    WAVEFORMATEX* pWfxOut = NULL;
-    WAVEFORMATEX Wfx = WAVEFORMATEX();
-    WAVEFORMATEX* pWfxClosestMatch = NULL;
-
-    // Create COM object with IAudioClient interface.
-    SAFE_RELEASE(_ptrClientOut);
-    hr = _ptrDeviceOut->Activate(
-                          __uuidof(IAudioClient),
-                          CLSCTX_ALL,
-                          NULL,
-                          (void**)&_ptrClientOut);
-    EXIT_ON_ERROR(hr);
-
-    // Retrieve the stream format that the audio engine uses for its internal
-    // processing (mixing) of shared-mode streams.
-    hr = _ptrClientOut->GetMixFormat(&pWfxOut);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
-        // format type
-        LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex
-                        << pWfxOut->wFormatTag << std::dec << " ("
-                        << pWfxOut->wFormatTag << ")";
-        // number of channels (i.e. mono, stereo...)
-        LOG(LS_VERBOSE) << "nChannels      : " << pWfxOut->nChannels;
-        // sample rate
-        LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
-        // for buffer estimation
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
-        // block size of data
-        LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxOut->nBlockAlign;
-        // number of bits per sample of mono data
-        LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize         : " << pWfxOut->cbSize;
-    }
-
-    // Set wave format
-    Wfx.wFormatTag = WAVE_FORMAT_PCM;
-    Wfx.wBitsPerSample = 16;
-    Wfx.cbSize = 0;
-
-    const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
-    hr = S_FALSE;
-
-    // Iterate over frequencies and channels, in order of priority
-    for (unsigned int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
-    {
-        for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList)/sizeof(_playChannelsPrioList[0]); chan++)
-        {
-            Wfx.nChannels = _playChannelsPrioList[chan];
-            Wfx.nSamplesPerSec = freqs[freq];
-            Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
-            Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
-            // If the method succeeds and the audio endpoint device supports the specified stream format,
-            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
-            // it returns S_FALSE.
-            hr = _ptrClientOut->IsFormatSupported(
-                                  AUDCLNT_SHAREMODE_SHARED,
-                                  &Wfx,
-                                  &pWfxClosestMatch);
-            if (hr == S_OK)
-            {
-                break;
-            }
-            else
-            {
-                if (pWfxClosestMatch)
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.nSamplesPerSec <<
-                        " is not supported. Closest match: " <<
-                        "nChannels=" << pWfxClosestMatch->nChannels <<
-                        ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
-                    CoTaskMemFree(pWfxClosestMatch);
-                    pWfxClosestMatch = NULL;
-                }
-                else
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.nSamplesPerSec <<
-                        " is not supported. No closest match.";
-                }
-            }
-        }
-        if (hr == S_OK)
-            break;
-    }
-
-    // TODO(andrew): what happens in the event of failure in the above loop?
-    //   Is _ptrClientOut->Initialize expected to fail?
-    //   Same in InitRecording().
-    if (hr == S_OK)
-    {
-        _playAudioFrameSize = Wfx.nBlockAlign;
-        // Block size in frames is the number of samples each channel in 10ms.
-        _playBlockSizeInFrames = Wfx.nSamplesPerSec / 100;
-        // Block size in samples is block size in frames times number of
-        // channels.
-        _playBlockSizeInSamples = _playBlockSizeInFrames * Wfx.nChannels;
-        _playSampleRate = Wfx.nSamplesPerSec;
-        _devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
-        _devicePlayBlockSize = Wfx.nSamplesPerSec/100;
-        _playChannels = Wfx.nChannels;
-
-        LOG(LS_VERBOSE) << "VoE selected this rendering format:";
-        LOG(LS_VERBOSE) << "wFormatTag         : 0x" << std::hex
-                        << Wfx.wFormatTag << std::dec << " (" << Wfx.wFormatTag
-                        << ")";
-        LOG(LS_VERBOSE) << "nChannels          : " << Wfx.nChannels;
-        LOG(LS_VERBOSE) << "nSamplesPerSec     : " << Wfx.nSamplesPerSec;
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec    : " << Wfx.nAvgBytesPerSec;
-        LOG(LS_VERBOSE) << "nBlockAlign        : " << Wfx.nBlockAlign;
-        LOG(LS_VERBOSE) << "wBitsPerSample     : " << Wfx.wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize             : " << Wfx.cbSize;
-        LOG(LS_VERBOSE) << "Additional settings:";
-        LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
-        LOG(LS_VERBOSE) << "_playBlockSizeInFrames     : "
-                        << _playBlockSizeInFrames;
-        LOG(LS_VERBOSE) << "_playChannels      : " << _playChannels;
-    }
-
-    // Create a rendering stream.
-    //
-    // ****************************************************************************
-    // For a shared-mode stream that uses event-driven buffering, the caller must
-    // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
-    // determines how large a buffer to allocate based on the scheduling period
-    // of the audio engine. Although the client's buffer processing thread is
-    // event driven, the basic buffer management process, as described previously,
-    // is unaltered.
-    // Each time the thread awakens, it should call IAudioClient::GetCurrentPadding
-    // to determine how much data to write to a rendering buffer or read from a capture
-    // buffer. In contrast to the two buffers that the Initialize method allocates
-    // for an exclusive-mode stream that uses event-driven buffering, a shared-mode
-    // stream requires a single buffer.
-    // ****************************************************************************
-    //
-    REFERENCE_TIME hnsBufferDuration = 0;  // ask for minimum buffer size (default)
-    if (_devicePlaySampleRate == 44100)
-    {
-        // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
-        // There seems to be a larger risk of underruns for 44.1 compared
-        // with the default rate (48kHz). When using default, we set the requested
-        // buffer duration to 0, which sets the buffer to the minimum size
-        // required by the engine thread. The actual buffer size can then be
-        // read by GetBufferSize() and it is 20ms on most machines.
-        hnsBufferDuration = 30*10000;
-    }
-    hr = _ptrClientOut->Initialize(
-                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
-                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK,    // processing of the audio buffer by the client will be event driven
-                          hnsBufferDuration,                    // requested buffer capacity as a time value (in 100-nanosecond units)
-                          0,                                    // periodicity
-                          &Wfx,                                 // selected wave format
-                          NULL);                                // session GUID
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
-    }
-    EXIT_ON_ERROR(hr);
-
-    if (_ptrAudioBuffer)
-    {
-        // Update the audio buffer with the selected parameters
-        _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
-        _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
-    }
-    else
-    {
-        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
-        // has been created, hence the AudioDeviceBuffer does not exist.
-        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
-
-    // Get the actual size of the shared (endpoint buffer).
-    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
-    UINT bufferFrameCount(0);
-    hr = _ptrClientOut->GetBufferSize(
-                          &bufferFrameCount);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
-                        << bufferFrameCount << " (<=> "
-                        << bufferFrameCount*_playAudioFrameSize << " bytes)";
-    }
-
-    // Set the event handle that the system signals when an audio buffer is ready
-    // to be processed by the client.
-    hr = _ptrClientOut->SetEventHandle(
-                          _hRenderSamplesReadyEvent);
-    EXIT_ON_ERROR(hr);
-
-    // Get an IAudioRenderClient interface.
-    SAFE_RELEASE(_ptrRenderClient);
-    hr = _ptrClientOut->GetService(
-                          __uuidof(IAudioRenderClient),
-                          (void**)&_ptrRenderClient);
-    EXIT_ON_ERROR(hr);
-
-    // Mark playout side as initialized
-    _playIsInitialized = true;
-
-    CoTaskMemFree(pWfxOut);
-    CoTaskMemFree(pWfxClosestMatch);
-
-    LOG(LS_VERBOSE) << "render side is now initialized";
+  if (_playIsInitialized) {
     return 0;
+  }
+
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  // Initialize the speaker (devices might have been added or removed)
+  if (InitSpeaker() == -1) {
+    LOG(LS_WARNING) << "InitSpeaker() failed";
+  }
+
+  // Ensure that the updated rendering endpoint device is valid
+  if (_ptrDeviceOut == NULL) {
+    return -1;
+  }
+
+  if (_builtInAecEnabled && _recIsInitialized) {
+    // Ensure the correct render device is configured in case
+    // InitRecording() was called before InitPlayout().
+    if (SetDMOProperties() == -1) {
+      return -1;
+    }
+  }
+
+  HRESULT hr = S_OK;
+  WAVEFORMATEX* pWfxOut = NULL;
+  WAVEFORMATEX Wfx = WAVEFORMATEX();
+  WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+  // Create COM object with IAudioClient interface.
+  SAFE_RELEASE(_ptrClientOut);
+  hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+                               (void**)&_ptrClientOut);
+  EXIT_ON_ERROR(hr);
+
+  // Retrieve the stream format that the audio engine uses for its internal
+  // processing (mixing) of shared-mode streams.
+  hr = _ptrClientOut->GetMixFormat(&pWfxOut);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
+    // format type
+    LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex << pWfxOut->wFormatTag
+                    << std::dec << " (" << pWfxOut->wFormatTag << ")";
+    // number of channels (i.e. mono, stereo...)
+    LOG(LS_VERBOSE) << "nChannels      : " << pWfxOut->nChannels;
+    // sample rate
+    LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
+    // for buffer estimation
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
+    // block size of data
+    LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxOut->nBlockAlign;
+    // number of bits per sample of mono data
+    LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize         : " << pWfxOut->cbSize;
+  }
+
+  // Set wave format
+  Wfx.wFormatTag = WAVE_FORMAT_PCM;
+  Wfx.wBitsPerSample = 16;
+  Wfx.cbSize = 0;
+
+  const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
+  hr = S_FALSE;
+
+  // Iterate over frequencies and channels, in order of priority
+  for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+    for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList) /
+                                           sizeof(_playChannelsPrioList[0]);
+         chan++) {
+      Wfx.nChannels = _playChannelsPrioList[chan];
+      Wfx.nSamplesPerSec = freqs[freq];
+      Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
+      Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
+      // If the method succeeds and the audio endpoint device supports the
+      // specified stream format, it returns S_OK. If the method succeeds and
+      // provides a closest match to the specified format, it returns S_FALSE.
+      hr = _ptrClientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx,
+                                            &pWfxClosestMatch);
+      if (hr == S_OK) {
+        break;
+      } else {
+        if (pWfxClosestMatch) {
+          LOG(INFO) << "nChannels=" << Wfx.nChannels
+                    << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+                    << " is not supported. Closest match: "
+                    << "nChannels=" << pWfxClosestMatch->nChannels
+                    << ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
+          CoTaskMemFree(pWfxClosestMatch);
+          pWfxClosestMatch = NULL;
+        } else {
+          LOG(INFO) << "nChannels=" << Wfx.nChannels
+                    << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+                    << " is not supported. No closest match.";
+        }
+      }
+    }
+    if (hr == S_OK)
+      break;
+  }
+
+  // TODO(andrew): what happens in the event of failure in the above loop?
+  //   Is _ptrClientOut->Initialize expected to fail?
+  //   Same in InitRecording().
+  if (hr == S_OK) {
+    _playAudioFrameSize = Wfx.nBlockAlign;
+    // Block size in frames is the number of samples each channel in 10ms.
+    _playBlockSizeInFrames = Wfx.nSamplesPerSec / 100;
+    // Block size in samples is block size in frames times number of
+    // channels.
+    _playBlockSizeInSamples = _playBlockSizeInFrames * Wfx.nChannels;
+    _playSampleRate = Wfx.nSamplesPerSec;
+    _devicePlaySampleRate =
+        Wfx.nSamplesPerSec;  // The device itself continues to run at 44.1 kHz.
+    _devicePlayBlockSize = Wfx.nSamplesPerSec / 100;
+    _playChannels = Wfx.nChannels;
+
+    LOG(LS_VERBOSE) << "VoE selected this rendering format:";
+    LOG(LS_VERBOSE) << "wFormatTag         : 0x" << std::hex << Wfx.wFormatTag
+                    << std::dec << " (" << Wfx.wFormatTag << ")";
+    LOG(LS_VERBOSE) << "nChannels          : " << Wfx.nChannels;
+    LOG(LS_VERBOSE) << "nSamplesPerSec     : " << Wfx.nSamplesPerSec;
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec    : " << Wfx.nAvgBytesPerSec;
+    LOG(LS_VERBOSE) << "nBlockAlign        : " << Wfx.nBlockAlign;
+    LOG(LS_VERBOSE) << "wBitsPerSample     : " << Wfx.wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize             : " << Wfx.cbSize;
+    LOG(LS_VERBOSE) << "Additional settings:";
+    LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
+    LOG(LS_VERBOSE) << "_playBlockSizeInFrames     : "
+                    << _playBlockSizeInFrames;
+    LOG(LS_VERBOSE) << "_playChannels      : " << _playChannels;
+  }
+
+  // Create a rendering stream.
+  //
+  // ****************************************************************************
+  // For a shared-mode stream that uses event-driven buffering, the caller must
+  // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
+  // determines how large a buffer to allocate based on the scheduling period
+  // of the audio engine. Although the client's buffer processing thread is
+  // event driven, the basic buffer management process, as described previously,
+  // is unaltered.
+  // Each time the thread awakens, it should call
+  // IAudioClient::GetCurrentPadding to determine how much data to write to a
+  // rendering buffer or read from a capture buffer. In contrast to the two
+  // buffers that the Initialize method allocates for an exclusive-mode stream
+  // that uses event-driven buffering, a shared-mode stream requires a single
+  // buffer.
+  // ****************************************************************************
+  //
+  REFERENCE_TIME hnsBufferDuration =
+      0;  // ask for minimum buffer size (default)
+  if (_devicePlaySampleRate == 44100) {
+    // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
+    // There seems to be a larger risk of underruns for 44.1 compared
+    // with the default rate (48kHz). When using default, we set the requested
+    // buffer duration to 0, which sets the buffer to the minimum size
+    // required by the engine thread. The actual buffer size can then be
+    // read by GetBufferSize() and it is 20ms on most machines.
+    hnsBufferDuration = 30 * 10000;
+  }
+  hr = _ptrClientOut->Initialize(
+      AUDCLNT_SHAREMODE_SHARED,  // share Audio Engine with other applications
+      AUDCLNT_STREAMFLAGS_EVENTCALLBACK,  // processing of the audio buffer by
+                                          // the client will be event driven
+      hnsBufferDuration,  // requested buffer capacity as a time value (in
+                          // 100-nanosecond units)
+      0,                  // periodicity
+      &Wfx,               // selected wave format
+      NULL);              // session GUID
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+  }
+  EXIT_ON_ERROR(hr);
+
+  if (_ptrAudioBuffer) {
+    // Update the audio buffer with the selected parameters
+    _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
+    _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+  } else {
+    // We can enter this state during CoreAudioIsSupported() when no
+    // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+    // does not exist. It is OK to end up here since we don't initiate any media
+    // in CoreAudioIsSupported().
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
+
+  // Get the actual size of the shared (endpoint buffer).
+  // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+  UINT bufferFrameCount(0);
+  hr = _ptrClientOut->GetBufferSize(&bufferFrameCount);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " << bufferFrameCount
+                    << " (<=> " << bufferFrameCount * _playAudioFrameSize
+                    << " bytes)";
+  }
+
+  // Set the event handle that the system signals when an audio buffer is ready
+  // to be processed by the client.
+  hr = _ptrClientOut->SetEventHandle(_hRenderSamplesReadyEvent);
+  EXIT_ON_ERROR(hr);
+
+  // Get an IAudioRenderClient interface.
+  SAFE_RELEASE(_ptrRenderClient);
+  hr = _ptrClientOut->GetService(__uuidof(IAudioRenderClient),
+                                 (void**)&_ptrRenderClient);
+  EXIT_ON_ERROR(hr);
+
+  // Mark playout side as initialized
+  _playIsInitialized = true;
+
+  CoTaskMemFree(pWfxOut);
+  CoTaskMemFree(pWfxClosestMatch);
+
+  LOG(LS_VERBOSE) << "render side is now initialized";
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    CoTaskMemFree(pWfxOut);
-    CoTaskMemFree(pWfxClosestMatch);
-    SAFE_RELEASE(_ptrClientOut);
-    SAFE_RELEASE(_ptrRenderClient);
-    return -1;
+  _TraceCOMError(hr);
+  CoTaskMemFree(pWfxOut);
+  CoTaskMemFree(pWfxClosestMatch);
+  SAFE_RELEASE(_ptrClientOut);
+  SAFE_RELEASE(_ptrRenderClient);
+  return -1;
 }
 
 // Capture initialization when the built-in AEC DirectX Media Object (DMO) is
 // used. Called from InitRecording(), most of which is skipped over. The DMO
 // handles device initialization itself.
 // Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
-int32_t AudioDeviceWindowsCore::InitRecordingDMO()
-{
-    assert(_builtInAecEnabled);
-    assert(_dmo != NULL);
+int32_t AudioDeviceWindowsCore::InitRecordingDMO() {
+  assert(_builtInAecEnabled);
+  assert(_dmo != NULL);
 
-    if (SetDMOProperties() == -1)
-    {
-        return -1;
-    }
+  if (SetDMOProperties() == -1) {
+    return -1;
+  }
 
-    DMO_MEDIA_TYPE mt = {0};
-    HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
-    if (FAILED(hr))
-    {
-        MoFreeMediaType(&mt);
-        _TraceCOMError(hr);
-        return -1;
-    }
-    mt.majortype = MEDIATYPE_Audio;
-    mt.subtype = MEDIASUBTYPE_PCM;
-    mt.formattype = FORMAT_WaveFormatEx;
-
-    // Supported formats
-    // nChannels: 1 (in AEC-only mode)
-    // nSamplesPerSec: 8000, 11025, 16000, 22050
-    // wBitsPerSample: 16
-    WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
-    ptrWav->wFormatTag = WAVE_FORMAT_PCM;
-    ptrWav->nChannels = 1;
-    // 16000 is the highest we can support with our resampler.
-    ptrWav->nSamplesPerSec = 16000;
-    ptrWav->nAvgBytesPerSec = 32000;
-    ptrWav->nBlockAlign = 2;
-    ptrWav->wBitsPerSample = 16;
-    ptrWav->cbSize = 0;
-
-    // Set the VoE format equal to the AEC output format.
-    _recAudioFrameSize = ptrWav->nBlockAlign;
-    _recSampleRate = ptrWav->nSamplesPerSec;
-    _recBlockSize = ptrWav->nSamplesPerSec / 100;
-    _recChannels = ptrWav->nChannels;
-
-    // Set the DMO output format parameters.
-    hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+  DMO_MEDIA_TYPE mt = {0};
+  HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
+  if (FAILED(hr)) {
     MoFreeMediaType(&mt);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+    _TraceCOMError(hr);
+    return -1;
+  }
+  mt.majortype = MEDIATYPE_Audio;
+  mt.subtype = MEDIASUBTYPE_PCM;
+  mt.formattype = FORMAT_WaveFormatEx;
 
-    if (_ptrAudioBuffer)
-    {
-        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
-        _ptrAudioBuffer->SetRecordingChannels(_recChannels);
-    }
-    else
-    {
-        // Refer to InitRecording() for comments.
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
+  // Supported formats
+  // nChannels: 1 (in AEC-only mode)
+  // nSamplesPerSec: 8000, 11025, 16000, 22050
+  // wBitsPerSample: 16
+  WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
+  ptrWav->wFormatTag = WAVE_FORMAT_PCM;
+  ptrWav->nChannels = 1;
+  // 16000 is the highest we can support with our resampler.
+  ptrWav->nSamplesPerSec = 16000;
+  ptrWav->nAvgBytesPerSec = 32000;
+  ptrWav->nBlockAlign = 2;
+  ptrWav->wBitsPerSample = 16;
+  ptrWav->cbSize = 0;
 
-    _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
+  // Set the VoE format equal to the AEC output format.
+  _recAudioFrameSize = ptrWav->nBlockAlign;
+  _recSampleRate = ptrWav->nSamplesPerSec;
+  _recBlockSize = ptrWav->nSamplesPerSec / 100;
+  _recChannels = ptrWav->nChannels;
 
-    // Optional, but if called, must be after media types are set.
-    hr = _dmo->AllocateStreamingResources();
-    if (FAILED(hr))
-    {
-         _TraceCOMError(hr);
-        return -1;
-    }
+  // Set the DMO output format parameters.
+  hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+  MoFreeMediaType(&mt);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    _recIsInitialized = true;
-    LOG(LS_VERBOSE) << "Capture side is now initialized";
+  if (_ptrAudioBuffer) {
+    _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+    _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+  } else {
+    // Refer to InitRecording() for comments.
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
 
-    return 0;
+  _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
+
+  // Optional, but if called, must be after media types are set.
+  hr = _dmo->AllocateStreamingResources();
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+
+  _recIsInitialized = true;
+  LOG(LS_VERBOSE) << "Capture side is now initialized";
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  InitRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::InitRecording()
-{
+int32_t AudioDeviceWindowsCore::InitRecording() {
+  rtc::CritScope lock(&_critSect);
 
-    rtc::CritScope lock(&_critSect);
+  if (_recording) {
+    return -1;
+  }
 
-    if (_recording)
-    {
-        return -1;
-    }
-
-    if (_recIsInitialized)
-    {
-        return 0;
-    }
-
-    if (QueryPerformanceFrequency(&_perfCounterFreq) == 0)
-    {
-        return -1;
-    }
-    _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
-
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
-
-    // Initialize the microphone (devices might have been added or removed)
-    if (InitMicrophone() == -1)
-    {
-        LOG(LS_WARNING) << "InitMicrophone() failed";
-    }
-
-    // Ensure that the updated capturing endpoint device is valid
-    if (_ptrDeviceIn == NULL)
-    {
-        return -1;
-    }
-
-    if (_builtInAecEnabled)
-    {
-        // The DMO will configure the capture device.
-        return InitRecordingDMO();
-    }
-
-    HRESULT hr = S_OK;
-    WAVEFORMATEX* pWfxIn = NULL;
-    WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
-    WAVEFORMATEX* pWfxClosestMatch = NULL;
-
-    // Create COM object with IAudioClient interface.
-    SAFE_RELEASE(_ptrClientIn);
-    hr = _ptrDeviceIn->Activate(
-                          __uuidof(IAudioClient),
-                          CLSCTX_ALL,
-                          NULL,
-                          (void**)&_ptrClientIn);
-    EXIT_ON_ERROR(hr);
-
-    // Retrieve the stream format that the audio engine uses for its internal
-    // processing (mixing) of shared-mode streams.
-    hr = _ptrClientIn->GetMixFormat(&pWfxIn);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
-        // format type
-        LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex
-                        << pWfxIn->wFormatTag << std::dec << " ("
-                        << pWfxIn->wFormatTag << ")";
-        // number of channels (i.e. mono, stereo...)
-        LOG(LS_VERBOSE) << "nChannels      : " << pWfxIn->nChannels;
-        // sample rate
-        LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
-        // for buffer estimation
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
-        // block size of data
-        LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxIn->nBlockAlign;
-        // number of bits per sample of mono data
-        LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize         : " << pWfxIn->cbSize;
-    }
-
-    // Set wave format
-    Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
-    Wfx.Format.wBitsPerSample = 16;
-    Wfx.Format.cbSize = 22;
-    Wfx.dwChannelMask = 0;
-    Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
-    Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
-
-    const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
-    hr = S_FALSE;
-
-    // Iterate over frequencies and channels, in order of priority
-    for (unsigned int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
-    {
-        for (unsigned int chan = 0; chan < sizeof(_recChannelsPrioList)/sizeof(_recChannelsPrioList[0]); chan++)
-        {
-            Wfx.Format.nChannels = _recChannelsPrioList[chan];
-            Wfx.Format.nSamplesPerSec = freqs[freq];
-            Wfx.Format.nBlockAlign = Wfx.Format.nChannels *
-                                     Wfx.Format.wBitsPerSample / 8;
-            Wfx.Format.nAvgBytesPerSec = Wfx.Format.nSamplesPerSec *
-                                         Wfx.Format.nBlockAlign;
-            // If the method succeeds and the audio endpoint device supports the specified stream format,
-            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
-            // it returns S_FALSE.
-            hr = _ptrClientIn->IsFormatSupported(
-                                  AUDCLNT_SHAREMODE_SHARED,
-                                  (WAVEFORMATEX*)&Wfx,
-                                  &pWfxClosestMatch);
-            if (hr == S_OK)
-            {
-                break;
-            }
-            else
-            {
-                if (pWfxClosestMatch)
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.Format.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec <<
-                        " is not supported. Closest match: " <<
-                        "nChannels=" << pWfxClosestMatch->nChannels <<
-                        ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
-                    CoTaskMemFree(pWfxClosestMatch);
-                    pWfxClosestMatch = NULL;
-                }
-                else
-                {
-                    LOG(INFO) << "nChannels=" << Wfx.Format.nChannels <<
-                        ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec <<
-                        " is not supported. No closest match.";
-                }
-            }
-        }
-        if (hr == S_OK)
-            break;
-    }
-
-    if (hr == S_OK)
-    {
-        _recAudioFrameSize = Wfx.Format.nBlockAlign;
-        _recSampleRate = Wfx.Format.nSamplesPerSec;
-        _recBlockSize = Wfx.Format.nSamplesPerSec/100;
-        _recChannels = Wfx.Format.nChannels;
-
-        LOG(LS_VERBOSE) << "VoE selected this capturing format:";
-        LOG(LS_VERBOSE) << "wFormatTag        : 0x" << std::hex
-                        << Wfx.Format.wFormatTag << std::dec
-                        << " (" << Wfx.Format.wFormatTag << ")";
-        LOG(LS_VERBOSE) << "nChannels         : " << Wfx.Format.nChannels;
-        LOG(LS_VERBOSE) << "nSamplesPerSec    : " << Wfx.Format.nSamplesPerSec;
-        LOG(LS_VERBOSE) << "nAvgBytesPerSec   : " << Wfx.Format.nAvgBytesPerSec;
-        LOG(LS_VERBOSE) << "nBlockAlign       : " << Wfx.Format.nBlockAlign;
-        LOG(LS_VERBOSE) << "wBitsPerSample    : " << Wfx.Format.wBitsPerSample;
-        LOG(LS_VERBOSE) << "cbSize            : " << Wfx.Format.cbSize;
-        LOG(LS_VERBOSE) << "Additional settings:";
-        LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
-        LOG(LS_VERBOSE) << "_recBlockSize     : " << _recBlockSize;
-        LOG(LS_VERBOSE) << "_recChannels      : " << _recChannels;
-    }
-
-    // Create a capturing stream.
-    hr = _ptrClientIn->Initialize(
-                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
-                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK |   // processing of the audio buffer by the client will be event driven
-                          AUDCLNT_STREAMFLAGS_NOPERSIST,        // volume and mute settings for an audio session will not persist across system restarts
-                          0,                                    // required for event-driven shared mode
-                          0,                                    // periodicity
-                          (WAVEFORMATEX*)&Wfx,                  // selected wave format
-                          NULL);                                // session GUID
-
-
-    if (hr != S_OK)
-    {
-        LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
-    }
-    EXIT_ON_ERROR(hr);
-
-    if (_ptrAudioBuffer)
-    {
-        // Update the audio buffer with the selected parameters
-        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
-        _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
-    }
-    else
-    {
-        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
-        // has been created, hence the AudioDeviceBuffer does not exist.
-        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
-        LOG(LS_VERBOSE)
-            << "AudioDeviceBuffer must be attached before streaming can start";
-    }
-
-    // Get the actual size of the shared (endpoint buffer).
-    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
-    UINT bufferFrameCount(0);
-    hr = _ptrClientIn->GetBufferSize(
-                          &bufferFrameCount);
-    if (SUCCEEDED(hr))
-    {
-        LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
-                        << bufferFrameCount << " (<=> "
-                        << bufferFrameCount*_recAudioFrameSize << " bytes)";
-    }
-
-    // Set the event handle that the system signals when an audio buffer is ready
-    // to be processed by the client.
-    hr = _ptrClientIn->SetEventHandle(
-                          _hCaptureSamplesReadyEvent);
-    EXIT_ON_ERROR(hr);
-
-    // Get an IAudioCaptureClient interface.
-    SAFE_RELEASE(_ptrCaptureClient);
-    hr = _ptrClientIn->GetService(
-                          __uuidof(IAudioCaptureClient),
-                          (void**)&_ptrCaptureClient);
-    EXIT_ON_ERROR(hr);
-
-    // Mark capture side as initialized
-    _recIsInitialized = true;
-
-    CoTaskMemFree(pWfxIn);
-    CoTaskMemFree(pWfxClosestMatch);
-
-    LOG(LS_VERBOSE) << "capture side is now initialized";
+  if (_recIsInitialized) {
     return 0;
+  }
+
+  if (QueryPerformanceFrequency(&_perfCounterFreq) == 0) {
+    return -1;
+  }
+  _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
+
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  // Initialize the microphone (devices might have been added or removed)
+  if (InitMicrophone() == -1) {
+    LOG(LS_WARNING) << "InitMicrophone() failed";
+  }
+
+  // Ensure that the updated capturing endpoint device is valid
+  if (_ptrDeviceIn == NULL) {
+    return -1;
+  }
+
+  if (_builtInAecEnabled) {
+    // The DMO will configure the capture device.
+    return InitRecordingDMO();
+  }
+
+  HRESULT hr = S_OK;
+  WAVEFORMATEX* pWfxIn = NULL;
+  WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
+  WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+  // Create COM object with IAudioClient interface.
+  SAFE_RELEASE(_ptrClientIn);
+  hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+                              (void**)&_ptrClientIn);
+  EXIT_ON_ERROR(hr);
+
+  // Retrieve the stream format that the audio engine uses for its internal
+  // processing (mixing) of shared-mode streams.
+  hr = _ptrClientIn->GetMixFormat(&pWfxIn);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
+    // format type
+    LOG(LS_VERBOSE) << "wFormatTag     : 0x" << std::hex << pWfxIn->wFormatTag
+                    << std::dec << " (" << pWfxIn->wFormatTag << ")";
+    // number of channels (i.e. mono, stereo...)
+    LOG(LS_VERBOSE) << "nChannels      : " << pWfxIn->nChannels;
+    // sample rate
+    LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
+    // for buffer estimation
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
+    // block size of data
+    LOG(LS_VERBOSE) << "nBlockAlign    : " << pWfxIn->nBlockAlign;
+    // number of bits per sample of mono data
+    LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize         : " << pWfxIn->cbSize;
+  }
+
+  // Set wave format
+  Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+  Wfx.Format.wBitsPerSample = 16;
+  Wfx.Format.cbSize = 22;
+  Wfx.dwChannelMask = 0;
+  Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
+  Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+  const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
+  hr = S_FALSE;
+
+  // Iterate over frequencies and channels, in order of priority
+  for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+    for (unsigned int chan = 0;
+         chan < sizeof(_recChannelsPrioList) / sizeof(_recChannelsPrioList[0]);
+         chan++) {
+      Wfx.Format.nChannels = _recChannelsPrioList[chan];
+      Wfx.Format.nSamplesPerSec = freqs[freq];
+      Wfx.Format.nBlockAlign =
+          Wfx.Format.nChannels * Wfx.Format.wBitsPerSample / 8;
+      Wfx.Format.nAvgBytesPerSec =
+          Wfx.Format.nSamplesPerSec * Wfx.Format.nBlockAlign;
+      // If the method succeeds and the audio endpoint device supports the
+      // specified stream format, it returns S_OK. If the method succeeds and
+      // provides a closest match to the specified format, it returns S_FALSE.
+      hr = _ptrClientIn->IsFormatSupported(
+          AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&Wfx, &pWfxClosestMatch);
+      if (hr == S_OK) {
+        break;
+      } else {
+        if (pWfxClosestMatch) {
+          LOG(INFO) << "nChannels=" << Wfx.Format.nChannels
+                    << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+                    << " is not supported. Closest match: "
+                    << "nChannels=" << pWfxClosestMatch->nChannels
+                    << ", nSamplesPerSec=" << pWfxClosestMatch->nSamplesPerSec;
+          CoTaskMemFree(pWfxClosestMatch);
+          pWfxClosestMatch = NULL;
+        } else {
+          LOG(INFO) << "nChannels=" << Wfx.Format.nChannels
+                    << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+                    << " is not supported. No closest match.";
+        }
+      }
+    }
+    if (hr == S_OK)
+      break;
+  }
+
+  if (hr == S_OK) {
+    _recAudioFrameSize = Wfx.Format.nBlockAlign;
+    _recSampleRate = Wfx.Format.nSamplesPerSec;
+    _recBlockSize = Wfx.Format.nSamplesPerSec / 100;
+    _recChannels = Wfx.Format.nChannels;
+
+    LOG(LS_VERBOSE) << "VoE selected this capturing format:";
+    LOG(LS_VERBOSE) << "wFormatTag        : 0x" << std::hex
+                    << Wfx.Format.wFormatTag << std::dec << " ("
+                    << Wfx.Format.wFormatTag << ")";
+    LOG(LS_VERBOSE) << "nChannels         : " << Wfx.Format.nChannels;
+    LOG(LS_VERBOSE) << "nSamplesPerSec    : " << Wfx.Format.nSamplesPerSec;
+    LOG(LS_VERBOSE) << "nAvgBytesPerSec   : " << Wfx.Format.nAvgBytesPerSec;
+    LOG(LS_VERBOSE) << "nBlockAlign       : " << Wfx.Format.nBlockAlign;
+    LOG(LS_VERBOSE) << "wBitsPerSample    : " << Wfx.Format.wBitsPerSample;
+    LOG(LS_VERBOSE) << "cbSize            : " << Wfx.Format.cbSize;
+    LOG(LS_VERBOSE) << "Additional settings:";
+    LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
+    LOG(LS_VERBOSE) << "_recBlockSize     : " << _recBlockSize;
+    LOG(LS_VERBOSE) << "_recChannels      : " << _recChannels;
+  }
+
+  // Create a capturing stream.
+  hr = _ptrClientIn->Initialize(
+      AUDCLNT_SHAREMODE_SHARED,  // share Audio Engine with other applications
+      AUDCLNT_STREAMFLAGS_EVENTCALLBACK |  // processing of the audio buffer by
+                                           // the client will be event driven
+          AUDCLNT_STREAMFLAGS_NOPERSIST,   // volume and mute settings for an
+                                           // audio session will not persist
+                                           // across system restarts
+      0,                    // required for event-driven shared mode
+      0,                    // periodicity
+      (WAVEFORMATEX*)&Wfx,  // selected wave format
+      NULL);                // session GUID
+
+  if (hr != S_OK) {
+    LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+  }
+  EXIT_ON_ERROR(hr);
+
+  if (_ptrAudioBuffer) {
+    // Update the audio buffer with the selected parameters
+    _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+    _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+  } else {
+    // We can enter this state during CoreAudioIsSupported() when no
+    // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+    // does not exist. It is OK to end up here since we don't initiate any media
+    // in CoreAudioIsSupported().
+    LOG(LS_VERBOSE)
+        << "AudioDeviceBuffer must be attached before streaming can start";
+  }
+
+  // Get the actual size of the shared (endpoint buffer).
+  // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+  UINT bufferFrameCount(0);
+  hr = _ptrClientIn->GetBufferSize(&bufferFrameCount);
+  if (SUCCEEDED(hr)) {
+    LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " << bufferFrameCount
+                    << " (<=> " << bufferFrameCount * _recAudioFrameSize
+                    << " bytes)";
+  }
+
+  // Set the event handle that the system signals when an audio buffer is ready
+  // to be processed by the client.
+  hr = _ptrClientIn->SetEventHandle(_hCaptureSamplesReadyEvent);
+  EXIT_ON_ERROR(hr);
+
+  // Get an IAudioCaptureClient interface.
+  SAFE_RELEASE(_ptrCaptureClient);
+  hr = _ptrClientIn->GetService(__uuidof(IAudioCaptureClient),
+                                (void**)&_ptrCaptureClient);
+  EXIT_ON_ERROR(hr);
+
+  // Mark capture side as initialized
+  _recIsInitialized = true;
+
+  CoTaskMemFree(pWfxIn);
+  CoTaskMemFree(pWfxClosestMatch);
+
+  LOG(LS_VERBOSE) << "capture side is now initialized";
+  return 0;
 
 Exit:
-    _TraceCOMError(hr);
-    CoTaskMemFree(pWfxIn);
-    CoTaskMemFree(pWfxClosestMatch);
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrCaptureClient);
-    return -1;
+  _TraceCOMError(hr);
+  CoTaskMemFree(pWfxIn);
+  CoTaskMemFree(pWfxClosestMatch);
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrCaptureClient);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  StartRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StartRecording()
-{
+int32_t AudioDeviceWindowsCore::StartRecording() {
+  if (!_recIsInitialized) {
+    return -1;
+  }
 
-    if (!_recIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_hRecThread != NULL)
-    {
-        return 0;
-    }
-
-    if (_recording)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-
-        // Create thread which will drive the capturing
-        LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
-        if (_builtInAecEnabled)
-        {
-            // Redirect to the DMO polling method.
-            lpStartAddress = WSAPICaptureThreadPollDMO;
-
-            if (!_playing)
-            {
-                // The DMO won't provide us captured output data unless we
-                // give it render data to process.
-                LOG(LS_ERROR)
-                    << "Playout must be started before recording when using"
-                    << " the built-in AEC";
-                return -1;
-            }
-        }
-
-        assert(_hRecThread == NULL);
-        _hRecThread = CreateThread(NULL,
-                                   0,
-                                   lpStartAddress,
-                                   this,
-                                   0,
-                                   NULL);
-        if (_hRecThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the recording thread";
-            return -1;
-        }
-
-        // Set thread priority to highest possible
-        SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
-
-        assert(_hGetCaptureVolumeThread == NULL);
-        _hGetCaptureVolumeThread = CreateThread(NULL,
-                                                0,
-                                                GetCaptureVolumeThread,
-                                                this,
-                                                0,
-                                                NULL);
-        if (_hGetCaptureVolumeThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the volume getter thread";
-            return -1;
-        }
-
-        assert(_hSetCaptureVolumeThread == NULL);
-        _hSetCaptureVolumeThread = CreateThread(NULL,
-                                                0,
-                                                SetCaptureVolumeThread,
-                                                this,
-                                                0,
-                                                NULL);
-        if (_hSetCaptureVolumeThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the volume setter thread";
-            return -1;
-        }
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_VERBOSE) << "capturing did not start up properly";
-        return -1;
-    }
-    LOG(LS_VERBOSE) << "capture audio stream has now started...";
-
-    _recording = true;
-
+  if (_hRecThread != NULL) {
     return 0;
+  }
+
+  if (_recording) {
+    return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    // Create thread which will drive the capturing
+    LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
+    if (_builtInAecEnabled) {
+      // Redirect to the DMO polling method.
+      lpStartAddress = WSAPICaptureThreadPollDMO;
+
+      if (!_playing) {
+        // The DMO won't provide us captured output data unless we
+        // give it render data to process.
+        LOG(LS_ERROR) << "Playout must be started before recording when using"
+                      << " the built-in AEC";
+        return -1;
+      }
+    }
+
+    assert(_hRecThread == NULL);
+    _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL);
+    if (_hRecThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the recording thread";
+      return -1;
+    }
+
+    // Set thread priority to highest possible
+    SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
+
+    assert(_hGetCaptureVolumeThread == NULL);
+    _hGetCaptureVolumeThread =
+        CreateThread(NULL, 0, GetCaptureVolumeThread, this, 0, NULL);
+    if (_hGetCaptureVolumeThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the volume getter thread";
+      return -1;
+    }
+
+    assert(_hSetCaptureVolumeThread == NULL);
+    _hSetCaptureVolumeThread =
+        CreateThread(NULL, 0, SetCaptureVolumeThread, this, 0, NULL);
+    if (_hSetCaptureVolumeThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the volume setter thread";
+      return -1;
+    }
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_VERBOSE) << "capturing did not start up properly";
+    return -1;
+  }
+  LOG(LS_VERBOSE) << "capture audio stream has now started...";
+
+  _recording = true;
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StopRecording
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StopRecording()
-{
-    int32_t err = 0;
+int32_t AudioDeviceWindowsCore::StopRecording() {
+  int32_t err = 0;
 
-    if (!_recIsInitialized)
-    {
-        return 0;
-    }
+  if (!_recIsInitialized) {
+    return 0;
+  }
 
-    _Lock();
+  _Lock();
 
-    if (_hRecThread == NULL)
-    {
-        LOG(LS_VERBOSE)
-            << "no capturing stream is active => close down WASAPI only";
-        SAFE_RELEASE(_ptrClientIn);
-        SAFE_RELEASE(_ptrCaptureClient);
-        _recIsInitialized = false;
-        _recording = false;
-        _UnLock();
-        return 0;
-    }
-
-    // Stop the driving thread...
-    LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
-    // Manual-reset event; it will remain signalled to stop all capture threads.
-    SetEvent(_hShutdownCaptureEvent);
-
-    _UnLock();
-    DWORD ret = WaitForSingleObject(_hRecThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_ERROR)
-            << "failed to close down webrtc_core_audio_capture_thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
-    }
-
-    ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down volume getter thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "volume getter thread is now closed";
-    }
-
-    ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down volume setter thread";
-        err = -1;
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "volume setter thread is now closed";
-    }
-    _Lock();
-
-    ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
-    // Ensure that the thread has released these interfaces properly.
-    assert(err == -1 || _ptrClientIn == NULL);
-    assert(err == -1 || _ptrCaptureClient == NULL);
-
+  if (_hRecThread == NULL) {
+    LOG(LS_VERBOSE)
+        << "no capturing stream is active => close down WASAPI only";
+    SAFE_RELEASE(_ptrClientIn);
+    SAFE_RELEASE(_ptrCaptureClient);
     _recIsInitialized = false;
     _recording = false;
-
-    // These will create thread leaks in the result of an error,
-    // but we can at least resume the call.
-    CloseHandle(_hRecThread);
-    _hRecThread = NULL;
-
-    CloseHandle(_hGetCaptureVolumeThread);
-    _hGetCaptureVolumeThread = NULL;
-
-    CloseHandle(_hSetCaptureVolumeThread);
-    _hSetCaptureVolumeThread = NULL;
-
-    if (_builtInAecEnabled)
-    {
-        assert(_dmo != NULL);
-        // This is necessary. Otherwise the DMO can generate garbage render
-        // audio even after rendering has stopped.
-        HRESULT hr = _dmo->FreeStreamingResources();
-        if (FAILED(hr))
-        {
-            _TraceCOMError(hr);
-            err = -1;
-        }
-    }
-
-    // Reset the recording delay value.
-    _sndCardRecDelay = 0;
-
     _UnLock();
+    return 0;
+  }
 
-    return err;
+  // Stop the driving thread...
+  LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
+  // Manual-reset event; it will remain signalled to stop all capture threads.
+  SetEvent(_hShutdownCaptureEvent);
+
+  _UnLock();
+  DWORD ret = WaitForSingleObject(_hRecThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_ERROR) << "failed to close down webrtc_core_audio_capture_thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
+  }
+
+  ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down volume getter thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "volume getter thread is now closed";
+  }
+
+  ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down volume setter thread";
+    err = -1;
+  } else {
+    LOG(LS_VERBOSE) << "volume setter thread is now closed";
+  }
+  _Lock();
+
+  ResetEvent(_hShutdownCaptureEvent);  // Must be manually reset.
+  // Ensure that the thread has released these interfaces properly.
+  assert(err == -1 || _ptrClientIn == NULL);
+  assert(err == -1 || _ptrCaptureClient == NULL);
+
+  _recIsInitialized = false;
+  _recording = false;
+
+  // These will create thread leaks in the result of an error,
+  // but we can at least resume the call.
+  CloseHandle(_hRecThread);
+  _hRecThread = NULL;
+
+  CloseHandle(_hGetCaptureVolumeThread);
+  _hGetCaptureVolumeThread = NULL;
+
+  CloseHandle(_hSetCaptureVolumeThread);
+  _hSetCaptureVolumeThread = NULL;
+
+  if (_builtInAecEnabled) {
+    assert(_dmo != NULL);
+    // This is necessary. Otherwise the DMO can generate garbage render
+    // audio even after rendering has stopped.
+    HRESULT hr = _dmo->FreeStreamingResources();
+    if (FAILED(hr)) {
+      _TraceCOMError(hr);
+      err = -1;
+    }
+  }
+
+  // Reset the recording delay value.
+  _sndCardRecDelay = 0;
+
+  _UnLock();
+
+  return err;
 }
 
 // ----------------------------------------------------------------------------
 //  RecordingIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::RecordingIsInitialized() const
-{
-    return (_recIsInitialized);
+bool AudioDeviceWindowsCore::RecordingIsInitialized() const {
+  return (_recIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  Recording
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Recording() const
-{
-    return (_recording);
+bool AudioDeviceWindowsCore::Recording() const {
+  return (_recording);
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutIsInitialized
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::PlayoutIsInitialized() const
-{
-
-    return (_playIsInitialized);
+bool AudioDeviceWindowsCore::PlayoutIsInitialized() const {
+  return (_playIsInitialized);
 }
 
 // ----------------------------------------------------------------------------
 //  StartPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StartPlayout()
-{
+int32_t AudioDeviceWindowsCore::StartPlayout() {
+  if (!_playIsInitialized) {
+    return -1;
+  }
 
-    if (!_playIsInitialized)
-    {
-        return -1;
-    }
-
-    if (_hPlayThread != NULL)
-    {
-        return 0;
-    }
-
-    if (_playing)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-
-        // Create thread which will drive the rendering.
-        assert(_hPlayThread == NULL);
-        _hPlayThread = CreateThread(
-                         NULL,
-                         0,
-                         WSAPIRenderThread,
-                         this,
-                         0,
-                         NULL);
-        if (_hPlayThread == NULL)
-        {
-            LOG(LS_ERROR) << "failed to create the playout thread";
-            return -1;
-        }
-
-        // Set thread priority to highest possible.
-        SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        LOG(LS_VERBOSE) << "rendering did not start up properly";
-        return -1;
-    }
-
-    _playing = true;
-    LOG(LS_VERBOSE) << "rendering audio stream has now started...";
-
+  if (_hPlayThread != NULL) {
     return 0;
+  }
+
+  if (_playing) {
+    return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    // Create thread which will drive the rendering.
+    assert(_hPlayThread == NULL);
+    _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL);
+    if (_hPlayThread == NULL) {
+      LOG(LS_ERROR) << "failed to create the playout thread";
+      return -1;
+    }
+
+    // Set thread priority to highest possible.
+    SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
+  if (ret != WAIT_OBJECT_0) {
+    LOG(LS_VERBOSE) << "rendering did not start up properly";
+    return -1;
+  }
+
+  _playing = true;
+  LOG(LS_VERBOSE) << "rendering audio stream has now started...";
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  StopPlayout
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::StopPlayout()
-{
-
-    if (!_playIsInitialized)
-    {
-        return 0;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect) ;
-
-        if (_hPlayThread == NULL)
-        {
-            LOG(LS_VERBOSE)
-                << "no rendering stream is active => close down WASAPI only";
-            SAFE_RELEASE(_ptrClientOut);
-            SAFE_RELEASE(_ptrRenderClient);
-            _playIsInitialized = false;
-            _playing = false;
-            return 0;
-        }
-
-        // stop the driving thread...
-        LOG(LS_VERBOSE)
-            << "closing down the webrtc_core_audio_render_thread...";
-        SetEvent(_hShutdownRenderEvent);
-    }  // critScoped
-
-    DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
-    if (ret != WAIT_OBJECT_0)
-    {
-        // the thread did not stop as it should
-        LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
-        CloseHandle(_hPlayThread);
-        _hPlayThread = NULL;
-        _playIsInitialized = false;
-        _playing = false;
-        return -1;
-    }
-
-    {
-        rtc::CritScope critScoped(&_critSect);
-        LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
-
-        // to reset this event manually at each time we finish with it,
-        // in case that the render thread has exited before StopPlayout(),
-        // this event might be caught by the new render thread within same VoE instance.
-        ResetEvent(_hShutdownRenderEvent);
-
-        SAFE_RELEASE(_ptrClientOut);
-        SAFE_RELEASE(_ptrRenderClient);
-
-        _playIsInitialized = false;
-        _playing = false;
-
-        CloseHandle(_hPlayThread);
-        _hPlayThread = NULL;
-
-        if (_builtInAecEnabled && _recording)
-        {
-            // The DMO won't provide us captured output data unless we
-            // give it render data to process.
-            //
-            // We still permit the playout to shutdown, and trace a warning.
-            // Otherwise, VoE can get into a state which will never permit
-            // playout to stop properly.
-            LOG(LS_WARNING)
-                << "Recording should be stopped before playout when using the"
-                << " built-in AEC";
-        }
-
-        // Reset the playout delay value.
-        _sndCardPlayDelay = 0;
-    }  // critScoped
-
+int32_t AudioDeviceWindowsCore::StopPlayout() {
+  if (!_playIsInitialized) {
     return 0;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+
+    if (_hPlayThread == NULL) {
+      LOG(LS_VERBOSE)
+          << "no rendering stream is active => close down WASAPI only";
+      SAFE_RELEASE(_ptrClientOut);
+      SAFE_RELEASE(_ptrRenderClient);
+      _playIsInitialized = false;
+      _playing = false;
+      return 0;
+    }
+
+    // stop the driving thread...
+    LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_render_thread...";
+    SetEvent(_hShutdownRenderEvent);
+  }  // critScoped
+
+  DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
+  if (ret != WAIT_OBJECT_0) {
+    // the thread did not stop as it should
+    LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
+    CloseHandle(_hPlayThread);
+    _hPlayThread = NULL;
+    _playIsInitialized = false;
+    _playing = false;
+    return -1;
+  }
+
+  {
+    rtc::CritScope critScoped(&_critSect);
+    LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
+
+    // to reset this event manually at each time we finish with it,
+    // in case that the render thread has exited before StopPlayout(),
+    // this event might be caught by the new render thread within same VoE
+    // instance.
+    ResetEvent(_hShutdownRenderEvent);
+
+    SAFE_RELEASE(_ptrClientOut);
+    SAFE_RELEASE(_ptrRenderClient);
+
+    _playIsInitialized = false;
+    _playing = false;
+
+    CloseHandle(_hPlayThread);
+    _hPlayThread = NULL;
+
+    if (_builtInAecEnabled && _recording) {
+      // The DMO won't provide us captured output data unless we
+      // give it render data to process.
+      //
+      // We still permit the playout to shutdown, and trace a warning.
+      // Otherwise, VoE can get into a state which will never permit
+      // playout to stop properly.
+      LOG(LS_WARNING)
+          << "Recording should be stopped before playout when using the"
+          << " built-in AEC";
+    }
+
+    // Reset the playout delay value.
+    _sndCardPlayDelay = 0;
+  }  // critScoped
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  PlayoutDelay
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const
-{
-    rtc::CritScope critScoped(&_critSect);
-    delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
-    return 0;
+int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const {
+  rtc::CritScope critScoped(&_critSect);
+  delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  Playing
 // ----------------------------------------------------------------------------
 
-bool AudioDeviceWindowsCore::Playing() const
-{
-    return (_playing);
+bool AudioDeviceWindowsCore::Playing() const {
+  return (_playing);
 }
 
 // ============================================================================
@@ -3049,1068 +2707,953 @@
 //  [static] WSAPIRenderThread
 // ----------------------------------------------------------------------------
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoRenderThread();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoRenderThread();
 }
 
 // ----------------------------------------------------------------------------
 //  [static] WSAPICaptureThread
 // ----------------------------------------------------------------------------
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoCaptureThread();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoCaptureThread();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoCaptureThreadPollDMO();
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoCaptureThreadPollDMO();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoGetCaptureVolumeThread();
+DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoGetCaptureVolumeThread();
 }
 
-DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context)
-{
-    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
-        DoSetCaptureVolumeThread();
+DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context) {
+  return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+      ->DoSetCaptureVolumeThread();
 }
 
-DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread()
-{
-    HANDLE waitObject = _hShutdownCaptureEvent;
+DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread() {
+  HANDLE waitObject = _hShutdownCaptureEvent;
 
-    while (1)
-    {
-        if (AGC())
-        {
-            uint32_t currentMicLevel = 0;
-            if (MicrophoneVolume(currentMicLevel) == 0)
-            {
-                // This doesn't set the system volume, just stores it.
-                _Lock();
-                if (_ptrAudioBuffer)
-                {
-                    _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
-                }
-                _UnLock();
-            }
-        }
-
-        DWORD waitResult = WaitForSingleObject(waitObject,
-                                               GET_MIC_VOLUME_INTERVAL_MS);
-        switch (waitResult)
-        {
-            case WAIT_OBJECT_0: // _hShutdownCaptureEvent
-                return 0;
-            case WAIT_TIMEOUT:  // timeout notification
-                break;
-            default:            // unexpected error
-                LOG(LS_WARNING)
-                    << "unknown wait termination on get volume thread";
-                return 1;
-        }
-    }
-}
-
-DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread()
-{
-    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
-
-    while (1)
-    {
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
-        switch (waitResult)
-        {
-            case WAIT_OBJECT_0:      // _hShutdownCaptureEvent
-                return 0;
-            case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
-                break;
-            default:                 // unexpected error
-                LOG(LS_WARNING)
-                    << "unknown wait termination on set volume thread";
-                    return 1;
-        }
-
+  while (1) {
+    if (AGC()) {
+      uint32_t currentMicLevel = 0;
+      if (MicrophoneVolume(currentMicLevel) == 0) {
+        // This doesn't set the system volume, just stores it.
         _Lock();
-        uint32_t newMicLevel = _newMicLevel;
-        _UnLock();
-
-        if (SetMicrophoneVolume(newMicLevel) == -1)
-        {
-            LOG(LS_WARNING)
-                << "the required modification of the microphone volume failed";
+        if (_ptrAudioBuffer) {
+          _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
         }
+        _UnLock();
+      }
     }
+
+    DWORD waitResult =
+        WaitForSingleObject(waitObject, GET_MIC_VOLUME_INTERVAL_MS);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        return 0;
+      case WAIT_TIMEOUT:  // timeout notification
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on get volume thread";
+        return 1;
+    }
+  }
+}
+
+DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread() {
+  HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
+
+  while (1) {
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        return 0;
+      case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on set volume thread";
+        return 1;
+    }
+
+    _Lock();
+    uint32_t newMicLevel = _newMicLevel;
+    _UnLock();
+
+    if (SetMicrophoneVolume(newMicLevel) == -1) {
+      LOG(LS_WARNING)
+          << "the required modification of the microphone volume failed";
+    }
+  }
 }
 
 // ----------------------------------------------------------------------------
 //  DoRenderThread
 // ----------------------------------------------------------------------------
 
-DWORD AudioDeviceWindowsCore::DoRenderThread()
-{
+DWORD AudioDeviceWindowsCore::DoRenderThread() {
+  bool keepPlaying = true;
+  HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
+  HRESULT hr = S_OK;
+  HANDLE hMmTask = NULL;
 
-    bool keepPlaying = true;
-    HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
-    HRESULT hr = S_OK;
-    HANDLE hMmTask = NULL;
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in render thread";
+    return 1;
+  }
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in render thread";
-      return 1;
+  rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+
+  // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+  // priority.
+  //
+  if (_winSupportAvrt) {
+    DWORD taskIndex(0);
+    hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+    if (hMmTask) {
+      if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) {
+        LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
+      }
+      LOG(LS_VERBOSE)
+          << "render thread is now registered with MMCSS (taskIndex="
+          << taskIndex << ")";
+    } else {
+      LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
+                      << GetLastError() << ")";
+      _TraceCOMError(GetLastError());
+    }
+  }
+
+  _Lock();
+
+  IAudioClock* clock = NULL;
+
+  // Get size of rendering buffer (length is expressed as the number of audio
+  // frames the buffer can hold). This value is fixed during the rendering
+  // session.
+  //
+  UINT32 bufferLength = 0;
+  hr = _ptrClientOut->GetBufferSize(&bufferLength);
+  EXIT_ON_ERROR(hr);
+  LOG(LS_VERBOSE) << "[REND] size of buffer       : " << bufferLength;
+
+  // Get maximum latency for the current stream (will not change for the
+  // lifetime  of the IAudioClient object).
+  //
+  REFERENCE_TIME latency;
+  _ptrClientOut->GetStreamLatency(&latency);
+  LOG(LS_VERBOSE) << "[REND] max stream latency   : " << (DWORD)latency << " ("
+                  << (double)(latency / 10000.0) << " ms)";
+
+  // Get the length of the periodic interval separating successive processing
+  // passes by the audio engine on the data in the endpoint buffer.
+  //
+  // The period between processing passes by the audio engine is fixed for a
+  // particular audio endpoint device and represents the smallest processing
+  // quantum for the audio engine. This period plus the stream latency between
+  // the buffer and endpoint device represents the minimum possible latency that
+  // an audio application can achieve. Typical value: 100000 <=> 0.01 sec =
+  // 10ms.
+  //
+  REFERENCE_TIME devPeriod = 0;
+  REFERENCE_TIME devPeriodMin = 0;
+  _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
+  LOG(LS_VERBOSE) << "[REND] device period        : " << (DWORD)devPeriod
+                  << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+  // Derive initial rendering delay.
+  // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
+  //
+  int playout_delay = 10 * (bufferLength / _playBlockSizeInFrames) +
+                      (int)((latency + devPeriod) / 10000);
+  _sndCardPlayDelay = playout_delay;
+  _writtenSamples = 0;
+  LOG(LS_VERBOSE) << "[REND] initial delay        : " << playout_delay;
+
+  double endpointBufferSizeMS =
+      10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
+  LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : " << endpointBufferSizeMS;
+
+  // Before starting the stream, fill the rendering buffer with silence.
+  //
+  BYTE* pData = NULL;
+  hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
+  EXIT_ON_ERROR(hr);
+
+  hr =
+      _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
+  EXIT_ON_ERROR(hr);
+
+  _writtenSamples += bufferLength;
+
+  hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
+  if (FAILED(hr)) {
+    LOG(LS_WARNING)
+        << "failed to get IAudioClock interface from the IAudioClient";
+  }
+
+  // Start up the rendering audio stream.
+  hr = _ptrClientOut->Start();
+  EXIT_ON_ERROR(hr);
+
+  _UnLock();
+
+  // Set event which will ensure that the calling thread modifies the playing
+  // state to true.
+  //
+  SetEvent(_hRenderStartedEvent);
+
+  // >> ------------------ THREAD LOOP ------------------
+
+  while (keepPlaying) {
+    // Wait for a render notification event or a shutdown event
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+    switch (waitResult) {
+      case WAIT_OBJECT_0 + 0:  // _hShutdownRenderEvent
+        keepPlaying = false;
+        break;
+      case WAIT_OBJECT_0 + 1:  // _hRenderSamplesReadyEvent
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
+        goto Exit;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on render side";
+        goto Exit;
     }
 
-    rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+    while (keepPlaying) {
+      _Lock();
 
-    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority.
-    //
-    if (_winSupportAvrt)
-    {
-        DWORD taskIndex(0);
-        hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
-        if (hMmTask)
-        {
-            if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL))
-            {
-                LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
-            }
-            LOG(LS_VERBOSE)
-                << "render thread is now registered with MMCSS (taskIndex="
-                << taskIndex << ")";
-        }
-        else
-        {
-            LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
-                            << GetLastError() << ")";
-            _TraceCOMError(GetLastError());
-        }
-    }
+      // Sanity check to ensure that essential states are not modified
+      // during the unlocked period.
+      if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+        _UnLock();
+        LOG(LS_ERROR)
+            << "output state has been modified during unlocked period";
+        goto Exit;
+      }
 
-    _Lock();
+      // Get the number of frames of padding (queued up to play) in the endpoint
+      // buffer.
+      UINT32 padding = 0;
+      hr = _ptrClientOut->GetCurrentPadding(&padding);
+      EXIT_ON_ERROR(hr);
 
-    IAudioClock* clock = NULL;
+      // Derive the amount of available space in the output buffer
+      uint32_t framesAvailable = bufferLength - padding;
 
-    // Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
-    // This value is fixed during the rendering session.
-    //
-    UINT32 bufferLength = 0;
-    hr = _ptrClientOut->GetBufferSize(&bufferLength);
-    EXIT_ON_ERROR(hr);
-    LOG(LS_VERBOSE) << "[REND] size of buffer       : " << bufferLength;
+      // Do we have 10 ms available in the render buffer?
+      if (framesAvailable < _playBlockSizeInFrames) {
+        // Not enough space in render buffer to store next render packet.
+        _UnLock();
+        break;
+      }
 
-    // Get maximum latency for the current stream (will not change for the lifetime  of the IAudioClient object).
-    //
-    REFERENCE_TIME latency;
-    _ptrClientOut->GetStreamLatency(&latency);
-    LOG(LS_VERBOSE) << "[REND] max stream latency   : " << (DWORD)latency
-                    << " (" << (double)(latency/10000.0) << " ms)";
+      // Write n*10ms buffers to the render buffer
+      const uint32_t n10msBuffers = (framesAvailable / _playBlockSizeInFrames);
+      for (uint32_t n = 0; n < n10msBuffers; n++) {
+        // Get pointer (i.e., grab the buffer) to next space in the shared
+        // render buffer.
+        hr = _ptrRenderClient->GetBuffer(_playBlockSizeInFrames, &pData);
+        EXIT_ON_ERROR(hr);
 
-    // Get the length of the periodic interval separating successive processing passes by
-    // the audio engine on the data in the endpoint buffer.
-    //
-    // The period between processing passes by the audio engine is fixed for a particular
-    // audio endpoint device and represents the smallest processing quantum for the audio engine.
-    // This period plus the stream latency between the buffer and endpoint device represents
-    // the minimum possible latency that an audio application can achieve.
-    // Typical value: 100000 <=> 0.01 sec = 10ms.
-    //
-    REFERENCE_TIME devPeriod = 0;
-    REFERENCE_TIME devPeriodMin = 0;
-    _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
-    LOG(LS_VERBOSE) << "[REND] device period        : " << (DWORD)devPeriod
-                    << " (" << (double)(devPeriod/10000.0) << " ms)";
+        if (_ptrAudioBuffer) {
+          // Request data to be played out (#bytes =
+          // _playBlockSizeInFrames*_audioFrameSize)
+          _UnLock();
+          int32_t nSamples =
+              _ptrAudioBuffer->RequestPlayoutData(_playBlockSizeInFrames);
+          _Lock();
 
-    // Derive initial rendering delay.
-    // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
-    //
-    int playout_delay = 10 * (bufferLength / _playBlockSizeInFrames) +
-                        (int)((latency + devPeriod) / 10000);
-    _sndCardPlayDelay = playout_delay;
-    _writtenSamples = 0;
-    LOG(LS_VERBOSE) << "[REND] initial delay        : " << playout_delay;
-
-    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
-    LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : " << endpointBufferSizeMS;
-
-    // Before starting the stream, fill the rendering buffer with silence.
-    //
-    BYTE *pData = NULL;
-    hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
-    EXIT_ON_ERROR(hr);
-
-    hr = _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
-    EXIT_ON_ERROR(hr);
-
-    _writtenSamples += bufferLength;
-
-    hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
-    if (FAILED(hr)) {
-      LOG(LS_WARNING)
-          << "failed to get IAudioClock interface from the IAudioClient";
-    }
-
-    // Start up the rendering audio stream.
-    hr = _ptrClientOut->Start();
-    EXIT_ON_ERROR(hr);
-
-    _UnLock();
-
-    // Set event which will ensure that the calling thread modifies the playing state to true.
-    //
-    SetEvent(_hRenderStartedEvent);
-
-    // >> ------------------ THREAD LOOP ------------------
-
-    while (keepPlaying)
-    {
-        // Wait for a render notification event or a shutdown event
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0 + 0:     // _hShutdownRenderEvent
-            keepPlaying = false;
-            break;
-        case WAIT_OBJECT_0 + 1:     // _hRenderSamplesReadyEvent
-            break;
-        case WAIT_TIMEOUT:          // timeout notification
-            LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
-            goto Exit;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "unknown wait termination on render side";
-            goto Exit;
-        }
-
-        while (keepPlaying)
-        {
-            _Lock();
-
-            // Sanity check to ensure that essential states are not modified
-            // during the unlocked period.
-            if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
-            {
-                _UnLock();
-                LOG(LS_ERROR)
-                    << "output state has been modified during unlocked period";
-                goto Exit;
-            }
-
-            // Get the number of frames of padding (queued up to play) in the endpoint buffer.
-            UINT32 padding = 0;
-            hr = _ptrClientOut->GetCurrentPadding(&padding);
-            EXIT_ON_ERROR(hr);
-
-            // Derive the amount of available space in the output buffer
-            uint32_t framesAvailable = bufferLength - padding;
-
-            // Do we have 10 ms available in the render buffer?
-            if (framesAvailable < _playBlockSizeInFrames) {
-              // Not enough space in render buffer to store next render packet.
-              _UnLock();
-              break;
-            }
-
-            // Write n*10ms buffers to the render buffer
-            const uint32_t n10msBuffers =
-                (framesAvailable / _playBlockSizeInFrames);
-            for (uint32_t n = 0; n < n10msBuffers; n++)
-            {
-                // Get pointer (i.e., grab the buffer) to next space in the shared render buffer.
-                hr =
-                    _ptrRenderClient->GetBuffer(_playBlockSizeInFrames, &pData);
-                EXIT_ON_ERROR(hr);
-
-                if (_ptrAudioBuffer)
-                {
-                  // Request data to be played out (#bytes =
-                  // _playBlockSizeInFrames*_audioFrameSize)
-                  _UnLock();
-                  int32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(
-                      _playBlockSizeInFrames);
-                  _Lock();
-
-                  if (nSamples == -1) {
-                    _UnLock();
-                    LOG(LS_ERROR) << "failed to read data from render client";
-                    goto Exit;
-                    }
-
-                    // Sanity check to ensure that essential states are not modified during the unlocked period
-                    if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
-                    {
-                        _UnLock();
-                        LOG(LS_ERROR)
-                            << "output state has been modified during unlocked"
-                            << " period";
-                        goto Exit;
-                    }
-                    if (nSamples !=
-                        static_cast<int32_t>(_playBlockSizeInSamples)) {
-                      LOG(LS_WARNING)
-                          << "nSamples(" << nSamples
-                          << ") != _playBlockSizeInSamples("
-                          << _playBlockSizeInSamples << ")";
-                    }
-
-                    // Get the actual (stored) data
-                    nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
-                }
-
-                DWORD dwFlags(0);
-                hr = _ptrRenderClient->ReleaseBuffer(_playBlockSizeInFrames,
-                                                     dwFlags);
-                // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
-                // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
-                EXIT_ON_ERROR(hr);
-
-                _writtenSamples += _playBlockSizeInFrames;
-            }
-
-            // Check the current delay on the playout side.
-            if (clock) {
-              UINT64 pos = 0;
-              UINT64 freq = 1;
-              clock->GetPosition(&pos, NULL);
-              clock->GetFrequency(&freq);
-              playout_delay = ROUND((double(_writtenSamples) /
-                  _devicePlaySampleRate - double(pos) / freq) * 1000.0);
-              _sndCardPlayDelay = playout_delay;
-            }
-
+          if (nSamples == -1) {
             _UnLock();
+            LOG(LS_ERROR) << "failed to read data from render client";
+            goto Exit;
+          }
+
+          // Sanity check to ensure that essential states are not modified
+          // during the unlocked period
+          if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+            _UnLock();
+            LOG(LS_ERROR) << "output state has been modified during unlocked"
+                          << " period";
+            goto Exit;
+          }
+          if (nSamples != static_cast<int32_t>(_playBlockSizeInSamples)) {
+            LOG(LS_WARNING)
+                << "nSamples(" << nSamples << ") != _playBlockSizeInSamples("
+                << _playBlockSizeInSamples << ")";
+          }
+
+          // Get the actual (stored) data
+          nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
         }
+
+        DWORD dwFlags(0);
+        hr = _ptrRenderClient->ReleaseBuffer(_playBlockSizeInFrames, dwFlags);
+        // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
+        // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
+        EXIT_ON_ERROR(hr);
+
+        _writtenSamples += _playBlockSizeInFrames;
+      }
+
+      // Check the current delay on the playout side.
+      if (clock) {
+        UINT64 pos = 0;
+        UINT64 freq = 1;
+        clock->GetPosition(&pos, NULL);
+        clock->GetFrequency(&freq);
+        playout_delay = ROUND((double(_writtenSamples) / _devicePlaySampleRate -
+                               double(pos) / freq) *
+                              1000.0);
+        _sndCardPlayDelay = playout_delay;
+      }
+
+      _UnLock();
     }
+  }
 
-    // ------------------ THREAD LOOP ------------------ <<
+  // ------------------ THREAD LOOP ------------------ <<
 
-    SleepMs(static_cast<DWORD>(endpointBufferSizeMS+0.5));
-    hr = _ptrClientOut->Stop();
+  SleepMs(static_cast<DWORD>(endpointBufferSizeMS + 0.5));
+  hr = _ptrClientOut->Stop();
 
 Exit:
-    SAFE_RELEASE(clock);
+  SAFE_RELEASE(clock);
 
-    if (FAILED(hr))
-    {
-        _ptrClientOut->Stop();
-        _UnLock();
-        _TraceCOMError(hr);
-    }
-
-    if (_winSupportAvrt)
-    {
-        if (NULL != hMmTask)
-        {
-            _PAvRevertMmThreadCharacteristics(hMmTask);
-        }
-    }
-
-    _Lock();
-
-    if (keepPlaying)
-    {
-        if (_ptrClientOut != NULL)
-        {
-            hr = _ptrClientOut->Stop();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-            hr = _ptrClientOut->Reset();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-        }
-        LOG(LS_ERROR)
-            << "Playout error: rendering thread has ended pre-maturely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
-    }
-
+  if (FAILED(hr)) {
+    _ptrClientOut->Stop();
     _UnLock();
+    _TraceCOMError(hr);
+  }
 
-    return (DWORD)hr;
+  if (_winSupportAvrt) {
+    if (NULL != hMmTask) {
+      _PAvRevertMmThreadCharacteristics(hMmTask);
+    }
+  }
+
+  _Lock();
+
+  if (keepPlaying) {
+    if (_ptrClientOut != NULL) {
+      hr = _ptrClientOut->Stop();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+      hr = _ptrClientOut->Reset();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+    }
+    LOG(LS_ERROR) << "Playout error: rendering thread has ended pre-maturely";
+  } else {
+    LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
+  }
+
+  _UnLock();
+
+  return (DWORD)hr;
 }
 
-DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
-{
-    _hMmTask = NULL;
+DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority() {
+  _hMmTask = NULL;
 
-    rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
+  rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
 
-    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
-    // priority.
-    if (_winSupportAvrt)
-    {
-        DWORD taskIndex(0);
-        _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
-        if (_hMmTask)
-        {
-            if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
-            {
-                LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
-            }
-            LOG(LS_VERBOSE)
-                << "capture thread is now registered with MMCSS (taskIndex="
-                << taskIndex << ")";
-        }
-        else
-        {
-            LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
-                            << GetLastError() << ")";
-            _TraceCOMError(GetLastError());
-        }
+  // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+  // priority.
+  if (_winSupportAvrt) {
+    DWORD taskIndex(0);
+    _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+    if (_hMmTask) {
+      if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) {
+        LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
+      }
+      LOG(LS_VERBOSE)
+          << "capture thread is now registered with MMCSS (taskIndex="
+          << taskIndex << ")";
+    } else {
+      LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
+                      << GetLastError() << ")";
+      _TraceCOMError(GetLastError());
     }
+  }
 
-    return S_OK;
+  return S_OK;
 }
 
-void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
-{
-    if (_winSupportAvrt)
-    {
-        if (NULL != _hMmTask)
-        {
-            _PAvRevertMmThreadCharacteristics(_hMmTask);
-        }
+void AudioDeviceWindowsCore::RevertCaptureThreadPriority() {
+  if (_winSupportAvrt) {
+    if (NULL != _hMmTask) {
+      _PAvRevertMmThreadCharacteristics(_hMmTask);
     }
+  }
 
-    _hMmTask = NULL;
+  _hMmTask = NULL;
 }
 
-DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
-{
-    assert(_mediaBuffer != NULL);
-    bool keepRecording = true;
+DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
+  assert(_mediaBuffer != NULL);
+  bool keepRecording = true;
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
-      return 1;
-    }
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
+    return 1;
+  }
 
-    HRESULT hr = InitCaptureThreadPriority();
-    if (FAILED(hr))
-    {
-        return hr;
-    }
-
-    // Set event which will ensure that the calling thread modifies the
-    // recording state to true.
-    SetEvent(_hCaptureStartedEvent);
-
-    // >> ---------------------------- THREAD LOOP ----------------------------
-    while (keepRecording)
-    {
-        // Poll the DMO every 5 ms.
-        // (The same interval used in the Wave implementation.)
-        DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0:         // _hShutdownCaptureEvent
-            keepRecording = false;
-            break;
-        case WAIT_TIMEOUT:          // timeout notification
-            break;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "Unknown wait termination on capture side";
-            hr = -1; // To signal an error callback.
-            keepRecording = false;
-            break;
-        }
-
-        while (keepRecording)
-        {
-            rtc::CritScope critScoped(&_critSect);
-
-            DWORD dwStatus = 0;
-            {
-                DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
-                dmoBuffer.pBuffer = _mediaBuffer;
-                dmoBuffer.pBuffer->AddRef();
-
-                // Poll the DMO for AEC processed capture data. The DMO will
-                // copy available data to |dmoBuffer|, and should only return
-                // 10 ms frames. The value of |dwStatus| should be ignored.
-                hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
-                SAFE_RELEASE(dmoBuffer.pBuffer);
-                dwStatus = dmoBuffer.dwStatus;
-            }
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            ULONG bytesProduced = 0;
-            BYTE* data;
-            // Get a pointer to the data buffer. This should be valid until
-            // the next call to ProcessOutput.
-            hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            // TODO(andrew): handle AGC.
-
-            if (bytesProduced > 0)
-            {
-                const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
-                // TODO(andrew): verify that this is always satisfied. It might
-                // be that ProcessOutput will try to return more than 10 ms if
-                // we fail to call it frequently enough.
-                assert(kSamplesProduced == static_cast<int>(_recBlockSize));
-                assert(sizeof(BYTE) == sizeof(int8_t));
-                _ptrAudioBuffer->SetRecordedBuffer(
-                    reinterpret_cast<int8_t*>(data),
-                    kSamplesProduced);
-                _ptrAudioBuffer->SetVQEData(0, 0, 0);
-
-                _UnLock();  // Release lock while making the callback.
-                _ptrAudioBuffer->DeliverRecordedData();
-                _Lock();
-            }
-
-            // Reset length to indicate buffer availability.
-            hr = _mediaBuffer->SetLength(0);
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-                keepRecording = false;
-                assert(false);
-                break;
-            }
-
-            if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE))
-            {
-                // The DMO cannot currently produce more data. This is the
-                // normal case; otherwise it means the DMO had more than 10 ms
-                // of data available and ProcessOutput should be called again.
-                break;
-            }
-        }
-    }
-    // ---------------------------- THREAD LOOP ---------------------------- <<
-
-    RevertCaptureThreadPriority();
-
-    if (FAILED(hr))
-    {
-        LOG(LS_ERROR)
-            << "Recording error: capturing thread has ended prematurely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
-    }
-
+  HRESULT hr = InitCaptureThreadPriority();
+  if (FAILED(hr)) {
     return hr;
-}
+  }
 
+  // Set event which will ensure that the calling thread modifies the
+  // recording state to true.
+  SetEvent(_hCaptureStartedEvent);
+
+  // >> ---------------------------- THREAD LOOP ----------------------------
+  while (keepRecording) {
+    // Poll the DMO every 5 ms.
+    // (The same interval used in the Wave implementation.)
+    DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
+    switch (waitResult) {
+      case WAIT_OBJECT_0:  // _hShutdownCaptureEvent
+        keepRecording = false;
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        break;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "Unknown wait termination on capture side";
+        hr = -1;  // To signal an error callback.
+        keepRecording = false;
+        break;
+    }
+
+    while (keepRecording) {
+      rtc::CritScope critScoped(&_critSect);
+
+      DWORD dwStatus = 0;
+      {
+        DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
+        dmoBuffer.pBuffer = _mediaBuffer;
+        dmoBuffer.pBuffer->AddRef();
+
+        // Poll the DMO for AEC processed capture data. The DMO will
+        // copy available data to |dmoBuffer|, and should only return
+        // 10 ms frames. The value of |dwStatus| should be ignored.
+        hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
+        SAFE_RELEASE(dmoBuffer.pBuffer);
+        dwStatus = dmoBuffer.dwStatus;
+      }
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      ULONG bytesProduced = 0;
+      BYTE* data;
+      // Get a pointer to the data buffer. This should be valid until
+      // the next call to ProcessOutput.
+      hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      // TODO(andrew): handle AGC.
+
+      if (bytesProduced > 0) {
+        const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
+        // TODO(andrew): verify that this is always satisfied. It might
+        // be that ProcessOutput will try to return more than 10 ms if
+        // we fail to call it frequently enough.
+        assert(kSamplesProduced == static_cast<int>(_recBlockSize));
+        assert(sizeof(BYTE) == sizeof(int8_t));
+        _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast<int8_t*>(data),
+                                           kSamplesProduced);
+        _ptrAudioBuffer->SetVQEData(0, 0, 0);
+
+        _UnLock();  // Release lock while making the callback.
+        _ptrAudioBuffer->DeliverRecordedData();
+        _Lock();
+      }
+
+      // Reset length to indicate buffer availability.
+      hr = _mediaBuffer->SetLength(0);
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+        keepRecording = false;
+        assert(false);
+        break;
+      }
+
+      if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) {
+        // The DMO cannot currently produce more data. This is the
+        // normal case; otherwise it means the DMO had more than 10 ms
+        // of data available and ProcessOutput should be called again.
+        break;
+      }
+    }
+  }
+  // ---------------------------- THREAD LOOP ---------------------------- <<
+
+  RevertCaptureThreadPriority();
+
+  if (FAILED(hr)) {
+    LOG(LS_ERROR) << "Recording error: capturing thread has ended prematurely";
+  } else {
+    LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
+  }
+
+  return hr;
+}
 
 // ----------------------------------------------------------------------------
 //  DoCaptureThread
 // ----------------------------------------------------------------------------
 
-DWORD AudioDeviceWindowsCore::DoCaptureThread()
-{
+DWORD AudioDeviceWindowsCore::DoCaptureThread() {
+  bool keepRecording = true;
+  HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
+  HRESULT hr = S_OK;
 
-    bool keepRecording = true;
-    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
-    HRESULT hr = S_OK;
+  LARGE_INTEGER t1;
 
-    LARGE_INTEGER t1;
+  BYTE* syncBuffer = NULL;
+  UINT32 syncBufIndex = 0;
 
-    BYTE* syncBuffer = NULL;
-    UINT32 syncBufIndex = 0;
+  _readSamples = 0;
 
-    _readSamples = 0;
+  // Initialize COM as MTA in this thread.
+  ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+  if (!comInit.succeeded()) {
+    LOG(LS_ERROR) << "failed to initialize COM in capture thread";
+    return 1;
+  }
 
-    // Initialize COM as MTA in this thread.
-    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
-    if (!comInit.succeeded()) {
-      LOG(LS_ERROR) << "failed to initialize COM in capture thread";
-      return 1;
+  hr = InitCaptureThreadPriority();
+  if (FAILED(hr)) {
+    return hr;
+  }
+
+  _Lock();
+
+  // Get size of capturing buffer (length is expressed as the number of audio
+  // frames the buffer can hold). This value is fixed during the capturing
+  // session.
+  //
+  UINT32 bufferLength = 0;
+  if (_ptrClientIn == NULL) {
+    LOG(LS_ERROR)
+        << "input state has been modified before capture loop starts.";
+    return 1;
+  }
+  hr = _ptrClientIn->GetBufferSize(&bufferLength);
+  EXIT_ON_ERROR(hr);
+  LOG(LS_VERBOSE) << "[CAPT] size of buffer       : " << bufferLength;
+
+  // Allocate memory for sync buffer.
+  // It is used for compensation between native 44.1 and internal 44.0 and
+  // for cases when the capture buffer is larger than 10ms.
+  //
+  const UINT32 syncBufferSize = 2 * (bufferLength * _recAudioFrameSize);
+  syncBuffer = new BYTE[syncBufferSize];
+  if (syncBuffer == NULL) {
+    return (DWORD)E_POINTER;
+  }
+  LOG(LS_VERBOSE) << "[CAPT] size of sync buffer  : " << syncBufferSize
+                  << " [bytes]";
+
+  // Get maximum latency for the current stream (will not change for the
+  // lifetime of the IAudioClient object).
+  //
+  REFERENCE_TIME latency;
+  _ptrClientIn->GetStreamLatency(&latency);
+  LOG(LS_VERBOSE) << "[CAPT] max stream latency   : " << (DWORD)latency << " ("
+                  << (double)(latency / 10000.0) << " ms)";
+
+  // Get the length of the periodic interval separating successive processing
+  // passes by the audio engine on the data in the endpoint buffer.
+  //
+  REFERENCE_TIME devPeriod = 0;
+  REFERENCE_TIME devPeriodMin = 0;
+  _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
+  LOG(LS_VERBOSE) << "[CAPT] device period        : " << (DWORD)devPeriod
+                  << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+  double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
+  LOG(LS_VERBOSE) << "[CAPT] extraDelayMS         : " << extraDelayMS;
+
+  double endpointBufferSizeMS =
+      10.0 * ((double)bufferLength / (double)_recBlockSize);
+  LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : " << endpointBufferSizeMS;
+
+  // Start up the capturing stream.
+  //
+  hr = _ptrClientIn->Start();
+  EXIT_ON_ERROR(hr);
+
+  _UnLock();
+
+  // Set event which will ensure that the calling thread modifies the recording
+  // state to true.
+  //
+  SetEvent(_hCaptureStartedEvent);
+
+  // >> ---------------------------- THREAD LOOP ----------------------------
+
+  while (keepRecording) {
+    // Wait for a capture notification event or a shutdown event
+    DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+    switch (waitResult) {
+      case WAIT_OBJECT_0 + 0:  // _hShutdownCaptureEvent
+        keepRecording = false;
+        break;
+      case WAIT_OBJECT_0 + 1:  // _hCaptureSamplesReadyEvent
+        break;
+      case WAIT_TIMEOUT:  // timeout notification
+        LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
+        goto Exit;
+      default:  // unexpected error
+        LOG(LS_WARNING) << "unknown wait termination on capture side";
+        goto Exit;
     }
 
-    hr = InitCaptureThreadPriority();
-    if (FAILED(hr))
-    {
-        return hr;
-    }
+    while (keepRecording) {
+      BYTE* pData = 0;
+      UINT32 framesAvailable = 0;
+      DWORD flags = 0;
+      UINT64 recTime = 0;
+      UINT64 recPos = 0;
 
-    _Lock();
+      _Lock();
 
-    // Get size of capturing buffer (length is expressed as the number of audio frames the buffer can hold).
-    // This value is fixed during the capturing session.
-    //
-    UINT32 bufferLength = 0;
-    if (_ptrClientIn == NULL)
-    {
-      LOG(LS_ERROR)
-          << "input state has been modified before capture loop starts.";
-      return 1;
-    }
-    hr = _ptrClientIn->GetBufferSize(&bufferLength);
-    EXIT_ON_ERROR(hr);
-    LOG(LS_VERBOSE) << "[CAPT] size of buffer       : " << bufferLength;
+      // Sanity check to ensure that essential states are not modified
+      // during the unlocked period.
+      if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+        _UnLock();
+        LOG(LS_ERROR) << "input state has been modified during unlocked period";
+        goto Exit;
+      }
 
-    // Allocate memory for sync buffer.
-    // It is used for compensation between native 44.1 and internal 44.0 and
-    // for cases when the capture buffer is larger than 10ms.
-    //
-    const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
-    syncBuffer = new BYTE[syncBufferSize];
-    if (syncBuffer == NULL)
-    {
-        return (DWORD)E_POINTER;
-    }
-    LOG(LS_VERBOSE) << "[CAPT] size of sync buffer  : " << syncBufferSize
-                    << " [bytes]";
+      //  Find out how much capture data is available
+      //
+      hr = _ptrCaptureClient->GetBuffer(
+          &pData,            // packet which is ready to be read by used
+          &framesAvailable,  // #frames in the captured packet (can be zero)
+          &flags,            // support flags (check)
+          &recPos,    // device position of first audio frame in data packet
+          &recTime);  // value of performance counter at the time of recording
+                      // the first audio frame
 
-    // Get maximum latency for the current stream (will not change for the lifetime of the IAudioClient object).
-    //
-    REFERENCE_TIME latency;
-    _ptrClientIn->GetStreamLatency(&latency);
-    LOG(LS_VERBOSE) << "[CAPT] max stream latency   : " << (DWORD)latency
-                    << " (" << (double)(latency / 10000.0) << " ms)";
-
-    // Get the length of the periodic interval separating successive processing passes by
-    // the audio engine on the data in the endpoint buffer.
-    //
-    REFERENCE_TIME devPeriod = 0;
-    REFERENCE_TIME devPeriodMin = 0;
-    _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
-    LOG(LS_VERBOSE) << "[CAPT] device period        : " << (DWORD)devPeriod
-                    << " (" << (double)(devPeriod / 10000.0) << " ms)";
-
-    double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
-    LOG(LS_VERBOSE) << "[CAPT] extraDelayMS         : " << extraDelayMS;
-
-    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_recBlockSize);
-    LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : " << endpointBufferSizeMS;
-
-    // Start up the capturing stream.
-    //
-    hr = _ptrClientIn->Start();
-    EXIT_ON_ERROR(hr);
-
-    _UnLock();
-
-    // Set event which will ensure that the calling thread modifies the recording state to true.
-    //
-    SetEvent(_hCaptureStartedEvent);
-
-    // >> ---------------------------- THREAD LOOP ----------------------------
-
-    while (keepRecording)
-    {
-        // Wait for a capture notification event or a shutdown event
-        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
-        switch (waitResult)
-        {
-        case WAIT_OBJECT_0 + 0:        // _hShutdownCaptureEvent
-            keepRecording = false;
-            break;
-        case WAIT_OBJECT_0 + 1:        // _hCaptureSamplesReadyEvent
-            break;
-        case WAIT_TIMEOUT:            // timeout notification
-            LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
-            goto Exit;
-        default:                    // unexpected error
-            LOG(LS_WARNING) << "unknown wait termination on capture side";
-            goto Exit;
+      if (SUCCEEDED(hr)) {
+        if (AUDCLNT_S_BUFFER_EMPTY == hr) {
+          // Buffer was empty => start waiting for a new capture notification
+          // event
+          _UnLock();
+          break;
         }
 
-        while (keepRecording)
-        {
-            BYTE *pData = 0;
-            UINT32 framesAvailable = 0;
-            DWORD flags = 0;
-            UINT64 recTime = 0;
-            UINT64 recPos = 0;
+        if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+          // Treat all of the data in the packet as silence and ignore the
+          // actual data values.
+          LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
+          pData = NULL;
+        }
 
-            _Lock();
+        assert(framesAvailable != 0);
+
+        if (pData) {
+          CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData,
+                     framesAvailable * _recAudioFrameSize);
+        } else {
+          ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize],
+                     framesAvailable * _recAudioFrameSize);
+        }
+        assert(syncBufferSize >= (syncBufIndex * _recAudioFrameSize) +
+                                     framesAvailable * _recAudioFrameSize);
+
+        // Release the capture buffer
+        //
+        hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
+        EXIT_ON_ERROR(hr);
+
+        _readSamples += framesAvailable;
+        syncBufIndex += framesAvailable;
+
+        QueryPerformanceCounter(&t1);
+
+        // Get the current recording and playout delay.
+        uint32_t sndCardRecDelay = (uint32_t)(
+            ((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime) / 10000) +
+            (10 * syncBufIndex) / _recBlockSize - 10);
+        uint32_t sndCardPlayDelay = static_cast<uint32_t>(_sndCardPlayDelay);
+
+        _sndCardRecDelay = sndCardRecDelay;
+
+        while (syncBufIndex >= _recBlockSize) {
+          if (_ptrAudioBuffer) {
+            _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer,
+                                               _recBlockSize);
+            _ptrAudioBuffer->SetVQEData(sndCardPlayDelay, sndCardRecDelay, 0);
+
+            _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+            _UnLock();  // release lock while making the callback
+            _ptrAudioBuffer->DeliverRecordedData();
+            _Lock();  // restore the lock
 
             // Sanity check to ensure that essential states are not modified
-            // during the unlocked period.
-            if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
-            {
-                _UnLock();
-                LOG(LS_ERROR)
-                    << "input state has been modified during unlocked period";
-                goto Exit;
+            // during the unlocked period
+            if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+              _UnLock();
+              LOG(LS_ERROR) << "input state has been modified during"
+                            << " unlocked period";
+              goto Exit;
             }
+          }
 
-            //  Find out how much capture data is available
-            //
-            hr = _ptrCaptureClient->GetBuffer(&pData,           // packet which is ready to be read by used
-                                              &framesAvailable, // #frames in the captured packet (can be zero)
-                                              &flags,           // support flags (check)
-                                              &recPos,          // device position of first audio frame in data packet
-                                              &recTime);        // value of performance counter at the time of recording the first audio frame
-
-            if (SUCCEEDED(hr))
-            {
-                if (AUDCLNT_S_BUFFER_EMPTY == hr)
-                {
-                    // Buffer was empty => start waiting for a new capture notification event
-                    _UnLock();
-                    break;
-                }
-
-                if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
-                {
-                    // Treat all of the data in the packet as silence and ignore the actual data values.
-                    LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
-                    pData = NULL;
-                }
-
-                assert(framesAvailable != 0);
-
-                if (pData)
-                {
-                    CopyMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], pData, framesAvailable*_recAudioFrameSize);
-                }
-                else
-                {
-                    ZeroMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], framesAvailable*_recAudioFrameSize);
-                }
-                assert(syncBufferSize >= (syncBufIndex*_recAudioFrameSize)+framesAvailable*_recAudioFrameSize);
-
-                // Release the capture buffer
-                //
-                hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
-                EXIT_ON_ERROR(hr);
-
-                _readSamples += framesAvailable;
-                syncBufIndex += framesAvailable;
-
-                QueryPerformanceCounter(&t1);
-
-                // Get the current recording and playout delay.
-                uint32_t sndCardRecDelay = (uint32_t)
-                    (((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime)
-                        / 10000) + (10*syncBufIndex) / _recBlockSize - 10);
-                uint32_t sndCardPlayDelay =
-                    static_cast<uint32_t>(_sndCardPlayDelay);
-
-                _sndCardRecDelay = sndCardRecDelay;
-
-                while (syncBufIndex >= _recBlockSize)
-                {
-                    if (_ptrAudioBuffer)
-                    {
-                        _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
-                        _ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
-                                                    sndCardRecDelay,
-                                                    0);
-
-                        _ptrAudioBuffer->SetTypingStatus(KeyPressed());
-
-                        _UnLock();  // release lock while making the callback
-                        _ptrAudioBuffer->DeliverRecordedData();
-                        _Lock();    // restore the lock
-
-                        // Sanity check to ensure that essential states are not modified during the unlocked period
-                        if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
-                        {
-                            _UnLock();
-                            LOG(LS_ERROR)
-                                << "input state has been modified during"
-                                << " unlocked period";
-                            goto Exit;
-                        }
-                    }
-
-                    // store remaining data which was not able to deliver as 10ms segment
-                    MoveMemory(&syncBuffer[0], &syncBuffer[_recBlockSize*_recAudioFrameSize], (syncBufIndex-_recBlockSize)*_recAudioFrameSize);
-                    syncBufIndex -= _recBlockSize;
-                    sndCardRecDelay -= 10;
-                }
-
-                if (_AGC)
-                {
-                    uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
-                    if (newMicLevel != 0)
-                    {
-                        // The VQE will only deliver non-zero microphone levels when a change is needed.
-                        // Set this new mic level (received from the observer as return value in the callback).
-                        LOG(LS_VERBOSE) << "AGC change of volume: new="
-                                        << newMicLevel;
-                        // We store this outside of the audio buffer to avoid
-                        // having it overwritten by the getter thread.
-                        _newMicLevel = newMicLevel;
-                        SetEvent(_hSetCaptureVolumeEvent);
-                    }
-                }
-            }
-            else
-            {
-                // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the audio samples
-                // must wait for the next processing pass. The client might benefit from keeping a count
-                // of the failed GetBuffer calls. If GetBuffer returns this error repeatedly, the client
-                // can start a new processing loop after shutting down the current client by calling
-                // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio client.
-                LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
-                              << " AUDCLNT_E_BUFFER_ERROR, hr = 0x"
-                              << std::hex << hr << std::dec;
-                goto Exit;
-            }
-
-            _UnLock();
+          // store remaining data which was not able to deliver as 10ms segment
+          MoveMemory(&syncBuffer[0],
+                     &syncBuffer[_recBlockSize * _recAudioFrameSize],
+                     (syncBufIndex - _recBlockSize) * _recAudioFrameSize);
+          syncBufIndex -= _recBlockSize;
+          sndCardRecDelay -= 10;
         }
-    }
 
-    // ---------------------------- THREAD LOOP ---------------------------- <<
+        if (_AGC) {
+          uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
+          if (newMicLevel != 0) {
+            // The VQE will only deliver non-zero microphone levels when a
+            // change is needed. Set this new mic level (received from the
+            // observer as return value in the callback).
+            LOG(LS_VERBOSE) << "AGC change of volume: new=" << newMicLevel;
+            // We store this outside of the audio buffer to avoid
+            // having it overwritten by the getter thread.
+            _newMicLevel = newMicLevel;
+            SetEvent(_hSetCaptureVolumeEvent);
+          }
+        }
+      } else {
+        // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the
+        // audio samples must wait for the next processing pass. The client
+        // might benefit from keeping a count of the failed GetBuffer calls. If
+        // GetBuffer returns this error repeatedly, the client can start a new
+        // processing loop after shutting down the current client by calling
+        // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio
+        // client.
+        LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
+                      << " AUDCLNT_E_BUFFER_ERROR, hr = 0x" << std::hex << hr
+                      << std::dec;
+        goto Exit;
+      }
 
-    if (_ptrClientIn)
-    {
-        hr = _ptrClientIn->Stop();
+      _UnLock();
     }
+  }
+
+  // ---------------------------- THREAD LOOP ---------------------------- <<
+
+  if (_ptrClientIn) {
+    hr = _ptrClientIn->Stop();
+  }
 
 Exit:
-    if (FAILED(hr))
-    {
-        _ptrClientIn->Stop();
-        _UnLock();
-        _TraceCOMError(hr);
-    }
-
-    RevertCaptureThreadPriority();
-
-    _Lock();
-
-    if (keepRecording)
-    {
-        if (_ptrClientIn != NULL)
-        {
-            hr = _ptrClientIn->Stop();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-            hr = _ptrClientIn->Reset();
-            if (FAILED(hr))
-            {
-                _TraceCOMError(hr);
-            }
-        }
-
-        LOG(LS_ERROR)
-            << "Recording error: capturing thread has ended pre-maturely";
-    }
-    else
-    {
-        LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
-    }
-
-    SAFE_RELEASE(_ptrClientIn);
-    SAFE_RELEASE(_ptrCaptureClient);
-
+  if (FAILED(hr)) {
+    _ptrClientIn->Stop();
     _UnLock();
+    _TraceCOMError(hr);
+  }
 
-    if (syncBuffer)
-    {
-        delete [] syncBuffer;
+  RevertCaptureThreadPriority();
+
+  _Lock();
+
+  if (keepRecording) {
+    if (_ptrClientIn != NULL) {
+      hr = _ptrClientIn->Stop();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
+      hr = _ptrClientIn->Reset();
+      if (FAILED(hr)) {
+        _TraceCOMError(hr);
+      }
     }
 
-    return (DWORD)hr;
+    LOG(LS_ERROR) << "Recording error: capturing thread has ended pre-maturely";
+  } else {
+    LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
+  }
+
+  SAFE_RELEASE(_ptrClientIn);
+  SAFE_RELEASE(_ptrCaptureClient);
+
+  _UnLock();
+
+  if (syncBuffer) {
+    delete[] syncBuffer;
+  }
+
+  return (DWORD)hr;
 }
 
-int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
-{
+int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable) {
+  if (_recIsInitialized) {
+    LOG(LS_ERROR)
+        << "Attempt to set Windows AEC with recording already initialized";
+    return -1;
+  }
 
-    if (_recIsInitialized)
-    {
-        LOG(LS_ERROR)
-            << "Attempt to set Windows AEC with recording already initialized";
-        return -1;
-    }
+  if (_dmo == NULL) {
+    LOG(LS_ERROR)
+        << "Built-in AEC DMO was not initialized properly at create time";
+    return -1;
+  }
 
-    if (_dmo == NULL)
-    {
-        LOG(LS_ERROR)
-            << "Built-in AEC DMO was not initialized properly at create time";
-        return -1;
-    }
-
-    _builtInAecEnabled = enable;
-    return 0;
+  _builtInAecEnabled = enable;
+  return 0;
 }
 
-int AudioDeviceWindowsCore::SetDMOProperties()
-{
-    HRESULT hr = S_OK;
-    assert(_dmo != NULL);
+int AudioDeviceWindowsCore::SetDMOProperties() {
+  HRESULT hr = S_OK;
+  assert(_dmo != NULL);
 
-    rtc::scoped_refptr<IPropertyStore> ps;
-    {
-        IPropertyStore* ptrPS = NULL;
-        hr = _dmo->QueryInterface(IID_IPropertyStore,
-                                  reinterpret_cast<void**>(&ptrPS));
-        if (FAILED(hr) || ptrPS == NULL)
-        {
-            _TraceCOMError(hr);
-            return -1;
-        }
-        ps = ptrPS;
-        SAFE_RELEASE(ptrPS);
+  rtc::scoped_refptr<IPropertyStore> ps;
+  {
+    IPropertyStore* ptrPS = NULL;
+    hr = _dmo->QueryInterface(IID_IPropertyStore,
+                              reinterpret_cast<void**>(&ptrPS));
+    if (FAILED(hr) || ptrPS == NULL) {
+      _TraceCOMError(hr);
+      return -1;
+    }
+    ps = ptrPS;
+    SAFE_RELEASE(ptrPS);
+  }
+
+  // Set the AEC system mode.
+  // SINGLE_CHANNEL_AEC - AEC processing only.
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_SYSTEM_MODE, SINGLE_CHANNEL_AEC)) {
+    return -1;
+  }
+
+  // Set the AEC source mode.
+  // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_DMO_SOURCE_MODE, VARIANT_TRUE) ==
+      -1) {
+    return -1;
+  }
+
+  // Enable the feature mode.
+  // This lets us override all the default processing settings below.
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) == -1) {
+    return -1;
+  }
+
+  // Disable analog AGC (default enabled).
+  if (SetBoolProperty(ps, MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, VARIANT_FALSE) ==
+      -1) {
+    return -1;
+  }
+
+  // Disable noise suppression (default enabled).
+  // 0 - Disabled, 1 - Enabled
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
+    return -1;
+  }
+
+  // Relevant parameters to leave at default settings:
+  // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
+  // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
+  // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
+  //   TODO(andrew): investigate decresing the length to 128 ms.
+  // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
+  //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
+  //   selected 16 kHz) as long as mic array processing is disabled.
+  // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
+  // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
+
+  // Set the devices selected by VoE. If using a default device, we need to
+  // search for the device index.
+  int inDevIndex = _inputDeviceIndex;
+  int outDevIndex = _outputDeviceIndex;
+  if (!_usingInputDeviceIndex) {
+    ERole role = eCommunications;
+    if (_inputDevice == AudioDeviceModule::kDefaultDevice) {
+      role = eConsole;
     }
 
-    // Set the AEC system mode.
-    // SINGLE_CHANNEL_AEC - AEC processing only.
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_SYSTEM_MODE,
-                        SINGLE_CHANNEL_AEC))
-    {
-        return -1;
+    if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1) {
+      return -1;
+    }
+  }
+
+  if (!_usingOutputDeviceIndex) {
+    ERole role = eCommunications;
+    if (_outputDevice == AudioDeviceModule::kDefaultDevice) {
+      role = eConsole;
     }
 
-    // Set the AEC source mode.
-    // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
-                        VARIANT_TRUE) == -1)
-    {
-        return -1;
+    if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1) {
+      return -1;
     }
+  }
 
-    // Enable the feature mode.
-    // This lets us override all the default processing settings below.
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_FEATURE_MODE,
-                        VARIANT_TRUE) == -1)
-    {
-        return -1;
-    }
+  DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
+                   static_cast<uint32_t>(0x0000ffff & inDevIndex);
+  LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
+                  << ", render device index: " << outDevIndex;
+  if (SetVtI4Property(ps, MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) == -1) {
+    return -1;
+  }
 
-    // Disable analog AGC (default enabled).
-    if (SetBoolProperty(ps,
-                        MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
-                        VARIANT_FALSE) == -1)
-    {
-        return -1;
-    }
-
-    // Disable noise suppression (default enabled).
-    // 0 - Disabled, 1 - Enabled
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_FEATR_NS,
-                        0) == -1)
-    {
-        return -1;
-    }
-
-    // Relevant parameters to leave at default settings:
-    // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
-    // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
-    // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
-    //   TODO(andrew): investigate decresing the length to 128 ms.
-    // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
-    //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
-    //   selected 16 kHz) as long as mic array processing is disabled.
-    // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
-    // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
-
-    // Set the devices selected by VoE. If using a default device, we need to
-    // search for the device index.
-    int inDevIndex = _inputDeviceIndex;
-    int outDevIndex = _outputDeviceIndex;
-    if (!_usingInputDeviceIndex)
-    {
-        ERole role = eCommunications;
-        if (_inputDevice == AudioDeviceModule::kDefaultDevice)
-        {
-            role = eConsole;
-        }
-
-        if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1)
-        {
-            return -1;
-        }
-    }
-
-    if (!_usingOutputDeviceIndex)
-    {
-        ERole role = eCommunications;
-        if (_outputDevice == AudioDeviceModule::kDefaultDevice)
-        {
-            role = eConsole;
-        }
-
-        if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1)
-        {
-            return -1;
-        }
-    }
-
-    DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
-                     static_cast<uint32_t>(0x0000ffff & inDevIndex);
-    LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
-                    << ", render device index: " << outDevIndex;
-    if (SetVtI4Property(ps,
-                        MFPKEY_WMAAECMA_DEVICE_INDEXES,
-                        devIndex) == -1)
-    {
-        return -1;
-    }
-
-    return 0;
+  return 0;
 }
 
 int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
                                             REFPROPERTYKEY key,
-                                            VARIANT_BOOL value)
-{
-    PROPVARIANT pv;
-    PropVariantInit(&pv);
-    pv.vt = VT_BOOL;
-    pv.boolVal = value;
-    HRESULT hr = ptrPS->SetValue(key, pv);
-    PropVariantClear(&pv);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
-    return 0;
+                                            VARIANT_BOOL value) {
+  PROPVARIANT pv;
+  PropVariantInit(&pv);
+  pv.vt = VT_BOOL;
+  pv.boolVal = value;
+  HRESULT hr = ptrPS->SetValue(key, pv);
+  PropVariantClear(&pv);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+  return 0;
 }
 
 int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
                                             REFPROPERTYKEY key,
-                                            LONG value)
-{
-    PROPVARIANT pv;
-    PropVariantInit(&pv);
-    pv.vt = VT_I4;
-    pv.lVal = value;
-    HRESULT hr = ptrPS->SetValue(key, pv);
-    PropVariantClear(&pv);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
-    return 0;
+                                            LONG value) {
+  PROPVARIANT pv;
+  PropVariantInit(&pv);
+  pv.vt = VT_I4;
+  pv.lVal = value;
+  HRESULT hr = ptrPS->SetValue(key, pv);
+  PropVariantClear(&pv);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4121,40 +3664,33 @@
 //  such devices.
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDeviceCollection *pCollection = NULL;
+  HRESULT hr = S_OK;
+  IMMDeviceCollection* pCollection = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(_ptrEnumerator != NULL);
 
-    // Create a fresh list of devices using the specified direction
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                           dir,
-                           DEVICE_STATE_ACTIVE,
-                           &pCollection);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  // Create a fresh list of devices using the specified direction
+  hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE,
+                                          &pCollection);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    if (dir == eRender)
-    {
-        SAFE_RELEASE(_ptrRenderCollection);
-        _ptrRenderCollection = pCollection;
-    }
-    else
-    {
-        SAFE_RELEASE(_ptrCaptureCollection);
-        _ptrCaptureCollection = pCollection;
-    }
+  if (dir == eRender) {
+    SAFE_RELEASE(_ptrRenderCollection);
+    _ptrRenderCollection = pCollection;
+  } else {
+    SAFE_RELEASE(_ptrCaptureCollection);
+    _ptrCaptureCollection = pCollection;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4164,31 +3700,26 @@
 //  current list of such devices.
 // ----------------------------------------------------------------------------
 
-int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    UINT count = 0;
+  HRESULT hr = S_OK;
+  UINT count = 0;
 
-    assert(eRender == dir || eCapture == dir);
+  assert(eRender == dir || eCapture == dir);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->GetCount(&count);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->GetCount(&count);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->GetCount(&count);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->GetCount(&count);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    return static_cast<int16_t> (count);
+  return static_cast<int16_t>(count);
 }
 
 // ----------------------------------------------------------------------------
@@ -4202,34 +3733,32 @@
 //  in _RefreshDeviceList().
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir,
+                                                   int index,
+                                                   LPWSTR szBuffer,
+                                                   int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
+  assert(dir == eRender || dir == eCapture);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->Item(index, &pDevice);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->Item(index, &pDevice);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->Item(index, &pDevice);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->Item(index, &pDevice);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4241,32 +3770,30 @@
 //  Uses: _ptrEnumerator
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir,
+                                                      ERole role,
+                                                      LPWSTR szBuffer,
+                                                      int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(role == eConsole || role == eCommunications);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(role == eConsole || role == eCommunications);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           dir,
-                           role,
-                           &pDevice);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4280,34 +3807,32 @@
 //  in _RefreshDeviceList().
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir,
+                                                 int index,
+                                                 LPWSTR szBuffer,
+                                                 int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
+  assert(dir == eRender || dir == eCapture);
 
-    if (eRender == dir && NULL != _ptrRenderCollection)
-    {
-        hr = _ptrRenderCollection->Item(index, &pDevice);
-    }
-    else if (NULL != _ptrCaptureCollection)
-    {
-        hr = _ptrCaptureCollection->Item(index, &pDevice);
-    }
+  if (eRender == dir && NULL != _ptrRenderCollection) {
+    hr = _ptrRenderCollection->Item(index, &pDevice);
+  } else if (NULL != _ptrCaptureCollection) {
+    hr = _ptrCaptureCollection->Item(index, &pDevice);
+  }
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -4319,114 +3844,99 @@
 //  Uses: _ptrEnumerator
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir,
+                                                    ERole role,
+                                                    LPWSTR szBuffer,
+                                                    int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    IMMDevice *pDevice = NULL;
+  HRESULT hr = S_OK;
+  IMMDevice* pDevice = NULL;
 
-    assert(dir == eRender || dir == eCapture);
-    assert(role == eConsole || role == eCommunications);
-    assert(_ptrEnumerator != NULL);
+  assert(dir == eRender || dir == eCapture);
+  assert(role == eConsole || role == eCommunications);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                           dir,
-                           role,
-                           &pDevice);
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
 
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pDevice);
-        return -1;
-    }
-
-    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
     SAFE_RELEASE(pDevice);
-    return res;
+    return -1;
+  }
+
+  int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+  SAFE_RELEASE(pDevice);
+  return res;
 }
 
 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
                                                        ERole role,
-                                                       int* index)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+                                                       int* index) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr = S_OK;
-    WCHAR szDefaultDeviceID[MAX_PATH] = {0};
-    WCHAR szDeviceID[MAX_PATH] = {0};
+  HRESULT hr = S_OK;
+  WCHAR szDefaultDeviceID[MAX_PATH] = {0};
+  WCHAR szDeviceID[MAX_PATH] = {0};
 
-    const size_t kDeviceIDLength = sizeof(szDeviceID)/sizeof(szDeviceID[0]);
-    assert(kDeviceIDLength ==
-        sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
+  const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]);
+  assert(kDeviceIDLength ==
+         sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
 
-    if (_GetDefaultDeviceID(dir,
-                            role,
-                            szDefaultDeviceID,
-                            kDeviceIDLength) == -1)
+  if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) ==
+      -1) {
+    return -1;
+  }
+
+  IMMDeviceCollection* collection = _ptrCaptureCollection;
+  if (dir == eRender) {
+    collection = _ptrRenderCollection;
+  }
+
+  if (!collection) {
+    LOG(LS_ERROR) << "Device collection not valid";
+    return -1;
+  }
+
+  UINT count = 0;
+  hr = collection->GetCount(&count);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
+
+  *index = -1;
+  for (UINT i = 0; i < count; i++) {
+    memset(szDeviceID, 0, sizeof(szDeviceID));
+    rtc::scoped_refptr<IMMDevice> device;
     {
-        return -1;
-    }
-
-    IMMDeviceCollection* collection = _ptrCaptureCollection;
-    if (dir == eRender)
-    {
-        collection = _ptrRenderCollection;
-    }
-
-    if (!collection)
-    {
-        LOG(LS_ERROR) << "Device collection not valid";
-        return -1;
-    }
-
-    UINT count = 0;
-    hr = collection->GetCount(&count);
-    if (FAILED(hr))
-    {
+      IMMDevice* ptrDevice = NULL;
+      hr = collection->Item(i, &ptrDevice);
+      if (FAILED(hr) || ptrDevice == NULL) {
         _TraceCOMError(hr);
         return -1;
+      }
+      device = ptrDevice;
+      SAFE_RELEASE(ptrDevice);
     }
 
-    *index = -1;
-    for (UINT i = 0; i < count; i++)
-    {
-        memset(szDeviceID, 0, sizeof(szDeviceID));
-        rtc::scoped_refptr<IMMDevice> device;
-        {
-            IMMDevice* ptrDevice = NULL;
-            hr = collection->Item(i, &ptrDevice);
-            if (FAILED(hr) || ptrDevice == NULL)
-            {
-                _TraceCOMError(hr);
-                return -1;
-            }
-            device = ptrDevice;
-            SAFE_RELEASE(ptrDevice);
-        }
-
-        if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1)
-        {
-           return -1;
-        }
-
-        if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0)
-        {
-            // Found a match.
-            *index = i;
-            break;
-        }
-
+    if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1) {
+      return -1;
     }
 
-    if (*index == -1)
-    {
-        LOG(LS_ERROR) << "Unable to find collection index for default device";
-        return -1;
+    if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) {
+      // Found a match.
+      *index = i;
+      break;
     }
+  }
 
-    return 0;
+  if (*index == -1) {
+    LOG(LS_ERROR) << "Unable to find collection index for default device";
+    return -1;
+  }
+
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -4435,401 +3945,363 @@
 
 int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
                                                LPWSTR pszBuffer,
-                                               int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+                                               int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    static const WCHAR szDefault[] = L"<Device not available>";
+  static const WCHAR szDefault[] = L"<Device not available>";
 
-    HRESULT hr = E_FAIL;
-    IPropertyStore *pProps = NULL;
-    PROPVARIANT varName;
+  HRESULT hr = E_FAIL;
+  IPropertyStore* pProps = NULL;
+  PROPVARIANT varName;
 
-    assert(pszBuffer != NULL);
-    assert(bufferLen > 0);
+  assert(pszBuffer != NULL);
+  assert(bufferLen > 0);
 
-    if (pDevice != NULL)
-    {
-        hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
-        if (FAILED(hr))
-        {
-            LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
-                          << std::hex << hr << std::dec;
-        }
+  if (pDevice != NULL) {
+    hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
+    if (FAILED(hr)) {
+      LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
+                    << std::hex << hr << std::dec;
     }
+  }
 
-    // Initialize container for property value.
-    PropVariantInit(&varName);
+  // Initialize container for property value.
+  PropVariantInit(&varName);
 
-    if (SUCCEEDED(hr))
-    {
-        // Get the endpoint device's friendly-name property.
-        hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
-        if (FAILED(hr))
-        {
-            LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x"
-                          << std::hex << hr << std::dec;
-        }
+  if (SUCCEEDED(hr)) {
+    // Get the endpoint device's friendly-name property.
+    hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+    if (FAILED(hr)) {
+      LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x" << std::hex
+                    << hr << std::dec;
     }
+  }
 
-    if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt))
-    {
-        hr = E_FAIL;
-            LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
-                          << " hr = 0x" << std::hex << hr << std::dec;
-    }
+  if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt)) {
+    hr = E_FAIL;
+    LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
+                  << " hr = 0x" << std::hex << hr << std::dec;
+  }
 
-    if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt))
-    {
-        // The returned value is not a wide null terminated string.
-        hr = E_UNEXPECTED;
-            LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
-                          << " type, hr = 0x" << std::hex << hr << std::dec;
-    }
+  if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt)) {
+    // The returned value is not a wide null terminated string.
+    hr = E_UNEXPECTED;
+    LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
+                  << " type, hr = 0x" << std::hex << hr << std::dec;
+  }
 
-    if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
-    {
-        // Copy the valid device name to the provided ouput buffer.
-        wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
-    }
-    else
-    {
-        // Failed to find the device name.
-        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
-    }
+  if (SUCCEEDED(hr) && (varName.pwszVal != NULL)) {
+    // Copy the valid device name to the provided ouput buffer.
+    wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
+  } else {
+    // Failed to find the device name.
+    wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+  }
 
-    PropVariantClear(&varName);
-    SAFE_RELEASE(pProps);
+  PropVariantClear(&varName);
+  SAFE_RELEASE(pProps);
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetDeviceID
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice,
+                                             LPWSTR pszBuffer,
+                                             int bufferLen) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    static const WCHAR szDefault[] = L"<Device not available>";
+  static const WCHAR szDefault[] = L"<Device not available>";
 
-    HRESULT hr = E_FAIL;
-    LPWSTR pwszID = NULL;
+  HRESULT hr = E_FAIL;
+  LPWSTR pwszID = NULL;
 
-    assert(pszBuffer != NULL);
-    assert(bufferLen > 0);
+  assert(pszBuffer != NULL);
+  assert(bufferLen > 0);
 
-    if (pDevice != NULL)
-    {
-        hr = pDevice->GetId(&pwszID);
-    }
+  if (pDevice != NULL) {
+    hr = pDevice->GetId(&pwszID);
+  }
 
-    if (hr == S_OK)
-    {
-        // Found the device ID.
-        wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
-    }
-    else
-    {
-        // Failed to find the device ID.
-        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
-    }
+  if (hr == S_OK) {
+    // Found the device ID.
+    wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
+  } else {
+    // Failed to find the device ID.
+    wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+  }
 
-    CoTaskMemFree(pwszID);
-    return 0;
+  CoTaskMemFree(pwszID);
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetDefaultDevice
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice)
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir,
+                                                  ERole role,
+                                                  IMMDevice** ppDevice) {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    HRESULT hr(S_OK);
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
-                                   dir,
-                                   role,
-                                   ppDevice);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        return -1;
-    }
+  hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    return -1;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _GetListDevice
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice)
-{
-    HRESULT hr(S_OK);
+int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir,
+                                               int index,
+                                               IMMDevice** ppDevice) {
+  HRESULT hr(S_OK);
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    IMMDeviceCollection *pCollection = NULL;
+  IMMDeviceCollection* pCollection = NULL;
 
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                               dir,
-                               DEVICE_STATE_ACTIVE,        // only active endpoints are OK
-                               &pCollection);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  hr = _ptrEnumerator->EnumAudioEndpoints(
+      dir,
+      DEVICE_STATE_ACTIVE,  // only active endpoints are OK
+      &pCollection);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    hr = pCollection->Item(
-                        index,
-                        ppDevice);
-    if (FAILED(hr))
-    {
-        _TraceCOMError(hr);
-        SAFE_RELEASE(pCollection);
-        return -1;
-    }
+  hr = pCollection->Item(index, ppDevice);
+  if (FAILED(hr)) {
+    _TraceCOMError(hr);
+    SAFE_RELEASE(pCollection);
+    return -1;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ----------------------------------------------------------------------------
 //  _EnumerateEndpointDevicesAll
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const
-{
-    LOG(LS_VERBOSE) << __FUNCTION__;
+int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(
+    EDataFlow dataFlow) const {
+  LOG(LS_VERBOSE) << __FUNCTION__;
 
-    assert(_ptrEnumerator != NULL);
+  assert(_ptrEnumerator != NULL);
 
-    HRESULT hr = S_OK;
-    IMMDeviceCollection *pCollection = NULL;
-    IMMDevice *pEndpoint = NULL;
-    IPropertyStore *pProps = NULL;
-    IAudioEndpointVolume* pEndpointVolume = NULL;
-    LPWSTR pwszID = NULL;
+  HRESULT hr = S_OK;
+  IMMDeviceCollection* pCollection = NULL;
+  IMMDevice* pEndpoint = NULL;
+  IPropertyStore* pProps = NULL;
+  IAudioEndpointVolume* pEndpointVolume = NULL;
+  LPWSTR pwszID = NULL;
 
-    // Generate a collection of audio endpoint devices in the system.
-    // Get states for *all* endpoint devices.
-    // Output: IMMDeviceCollection interface.
-    hr = _ptrEnumerator->EnumAudioEndpoints(
-                                 dataFlow,            // data-flow direction (input parameter)
-                                 DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
-                                 &pCollection);        // release interface when done
+  // Generate a collection of audio endpoint devices in the system.
+  // Get states for *all* endpoint devices.
+  // Output: IMMDeviceCollection interface.
+  hr = _ptrEnumerator->EnumAudioEndpoints(
+      dataFlow,  // data-flow direction (input parameter)
+      DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
+      &pCollection);  // release interface when done
 
-    EXIT_ON_ERROR(hr);
+  EXIT_ON_ERROR(hr);
 
-    // use the IMMDeviceCollection interface...
+  // use the IMMDeviceCollection interface...
 
-    UINT count = 0;
+  UINT count = 0;
 
-    // Retrieve a count of the devices in the device collection.
-    hr = pCollection->GetCount(&count);
-    EXIT_ON_ERROR(hr);
-    if (dataFlow == eRender)
-        LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): "
-                        << count;
-    else if (dataFlow == eCapture)
-        LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): "
-                        << count;
+  // Retrieve a count of the devices in the device collection.
+  hr = pCollection->GetCount(&count);
+  EXIT_ON_ERROR(hr);
+  if (dataFlow == eRender)
+    LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): " << count;
+  else if (dataFlow == eCapture)
+    LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): " << count;
 
-    if (count == 0)
-    {
-        return 0;
-    }
-
-    // Each loop prints the name of an endpoint device.
-    for (ULONG i = 0; i < count; i++)
-    {
-        LOG(LS_VERBOSE) << "Endpoint " << i << ":";
-
-        // Get pointer to endpoint number i.
-        // Output: IMMDevice interface.
-        hr = pCollection->Item(
-                            i,
-                            &pEndpoint);
-        CONTINUE_ON_ERROR(hr);
-
-        // use the IMMDevice interface of the specified endpoint device...
-
-        // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
-        hr = pEndpoint->GetId(&pwszID);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "ID string    : " << pwszID;
-
-        // Retrieve an interface to the device's property store.
-        // Output: IPropertyStore interface.
-        hr = pEndpoint->OpenPropertyStore(
-                          STGM_READ,
-                          &pProps);
-        CONTINUE_ON_ERROR(hr);
-
-        // use the IPropertyStore interface...
-
-        PROPVARIANT varName;
-        // Initialize container for property value.
-        PropVariantInit(&varName);
-
-        // Get the endpoint's friendly-name property.
-        // Example: "Speakers (Realtek High Definition Audio)"
-        hr = pProps->GetValue(
-                       PKEY_Device_FriendlyName,
-                       &varName);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
-
-        // Get the endpoint's current device state
-        DWORD dwState;
-        hr = pEndpoint->GetState(&dwState);
-        CONTINUE_ON_ERROR(hr);
-        if (dwState & DEVICE_STATE_ACTIVE)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : *ACTIVE*";
-        if (dwState & DEVICE_STATE_DISABLED)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : DISABLED";
-        if (dwState & DEVICE_STATE_NOTPRESENT)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : NOTPRESENT";
-        if (dwState & DEVICE_STATE_UNPLUGGED)
-            LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
-                            << ")  : UNPLUGGED";
-
-        // Check the hardware volume capabilities.
-        DWORD dwHwSupportMask = 0;
-        hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
-                               NULL, (void**)&pEndpointVolume);
-        CONTINUE_ON_ERROR(hr);
-        hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
-        CONTINUE_ON_ERROR(hr);
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
-            // The audio endpoint device supports a hardware volume control
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_VOLUME";
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
-            // The audio endpoint device supports a hardware mute control
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_MUTE";
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
-            // The audio endpoint device supports a hardware peak meter
-            LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask
-                            << std::dec << ") : HARDWARE_SUPPORT_METER";
-
-        // Check the channel count (#channels in the audio stream that enters or leaves the audio endpoint device)
-        UINT nChannelCount(0);
-        hr = pEndpointVolume->GetChannelCount(
-                                &nChannelCount);
-        CONTINUE_ON_ERROR(hr);
-        LOG(LS_VERBOSE) << "#channels    : " << nChannelCount;
-
-        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
-        {
-            // Get the volume range.
-            float fLevelMinDB(0.0);
-            float fLevelMaxDB(0.0);
-            float fVolumeIncrementDB(0.0);
-            hr = pEndpointVolume->GetVolumeRange(
-                                    &fLevelMinDB,
-                                    &fLevelMaxDB,
-                                    &fVolumeIncrementDB);
-            CONTINUE_ON_ERROR(hr);
-            LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
-                            << fLevelMaxDB << " (max), " << fVolumeIncrementDB
-                            << " (inc) [dB]";
-
-            // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is divided
-            // into n uniform intervals of size vinc = fVolumeIncrementDB, where
-            // n = (vmax ?vmin) / vinc.
-            // The values vmin, vmax, and vinc are measured in decibels. The client can set
-            // the volume level to one of n + 1 discrete values in the range from vmin to vmax.
-            int n = (int)((fLevelMaxDB-fLevelMinDB)/fVolumeIncrementDB);
-            LOG(LS_VERBOSE) << "#intervals   : " << n;
-
-            // Get information about the current step in the volume range.
-            // This method represents the volume level of the audio stream that enters or leaves
-            // the audio endpoint device as an index or "step" in a range of discrete volume levels.
-            // Output value nStepCount is the number of steps in the range. Output value nStep
-            // is the step index of the current volume level. If the number of steps is n = nStepCount,
-            // then step index nStep can assume values from 0 (minimum volume) to n ?1 (maximum volume).
-            UINT nStep(0);
-            UINT nStepCount(0);
-            hr = pEndpointVolume->GetVolumeStepInfo(
-                                    &nStep,
-                                    &nStepCount);
-            CONTINUE_ON_ERROR(hr);
-            LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
-                            << nStepCount << " (nStepCount)";
-        }
-Next:
-        if (FAILED(hr)) {
-          LOG(LS_VERBOSE) << "Error when logging device information";
-        }
-        CoTaskMemFree(pwszID);
-        pwszID = NULL;
-        PropVariantClear(&varName);
-        SAFE_RELEASE(pProps);
-        SAFE_RELEASE(pEndpoint);
-        SAFE_RELEASE(pEndpointVolume);
-    }
-    SAFE_RELEASE(pCollection);
+  if (count == 0) {
     return 0;
+  }
 
-Exit:
-    _TraceCOMError(hr);
+  // Each loop prints the name of an endpoint device.
+  for (ULONG i = 0; i < count; i++) {
+    LOG(LS_VERBOSE) << "Endpoint " << i << ":";
+
+    // Get pointer to endpoint number i.
+    // Output: IMMDevice interface.
+    hr = pCollection->Item(i, &pEndpoint);
+    CONTINUE_ON_ERROR(hr);
+
+    // use the IMMDevice interface of the specified endpoint device...
+
+    // Get the endpoint ID string (uniquely identifies the device among all
+    // audio endpoint devices)
+    hr = pEndpoint->GetId(&pwszID);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "ID string    : " << pwszID;
+
+    // Retrieve an interface to the device's property store.
+    // Output: IPropertyStore interface.
+    hr = pEndpoint->OpenPropertyStore(STGM_READ, &pProps);
+    CONTINUE_ON_ERROR(hr);
+
+    // use the IPropertyStore interface...
+
+    PROPVARIANT varName;
+    // Initialize container for property value.
+    PropVariantInit(&varName);
+
+    // Get the endpoint's friendly-name property.
+    // Example: "Speakers (Realtek High Definition Audio)"
+    hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
+
+    // Get the endpoint's current device state
+    DWORD dwState;
+    hr = pEndpoint->GetState(&dwState);
+    CONTINUE_ON_ERROR(hr);
+    if (dwState & DEVICE_STATE_ACTIVE)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : *ACTIVE*";
+    if (dwState & DEVICE_STATE_DISABLED)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : DISABLED";
+    if (dwState & DEVICE_STATE_NOTPRESENT)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : NOTPRESENT";
+    if (dwState & DEVICE_STATE_UNPLUGGED)
+      LOG(LS_VERBOSE) << "state (0x" << std::hex << dwState << std::dec
+                      << ")  : UNPLUGGED";
+
+    // Check the hardware volume capabilities.
+    DWORD dwHwSupportMask = 0;
+    hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+                             (void**)&pEndpointVolume);
+    CONTINUE_ON_ERROR(hr);
+    hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
+    CONTINUE_ON_ERROR(hr);
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
+      // The audio endpoint device supports a hardware volume control
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_VOLUME";
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
+      // The audio endpoint device supports a hardware mute control
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_MUTE";
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
+      // The audio endpoint device supports a hardware peak meter
+      LOG(LS_VERBOSE) << "hwmask (0x" << std::hex << dwHwSupportMask << std::dec
+                      << ") : HARDWARE_SUPPORT_METER";
+
+    // Check the channel count (#channels in the audio stream that enters or
+    // leaves the audio endpoint device)
+    UINT nChannelCount(0);
+    hr = pEndpointVolume->GetChannelCount(&nChannelCount);
+    CONTINUE_ON_ERROR(hr);
+    LOG(LS_VERBOSE) << "#channels    : " << nChannelCount;
+
+    if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) {
+      // Get the volume range.
+      float fLevelMinDB(0.0);
+      float fLevelMaxDB(0.0);
+      float fVolumeIncrementDB(0.0);
+      hr = pEndpointVolume->GetVolumeRange(&fLevelMinDB, &fLevelMaxDB,
+                                           &fVolumeIncrementDB);
+      CONTINUE_ON_ERROR(hr);
+      LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
+                      << fLevelMaxDB << " (max), " << fVolumeIncrementDB
+                      << " (inc) [dB]";
+
+      // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is
+      // divided into n uniform intervals of size vinc = fVolumeIncrementDB,
+      // where n = (vmax ?vmin) / vinc. The values vmin, vmax, and vinc are
+      // measured in decibels. The client can set the volume level to one of n +
+      // 1 discrete values in the range from vmin to vmax.
+      int n = (int)((fLevelMaxDB - fLevelMinDB) / fVolumeIncrementDB);
+      LOG(LS_VERBOSE) << "#intervals   : " << n;
+
+      // Get information about the current step in the volume range.
+      // This method represents the volume level of the audio stream that enters
+      // or leaves the audio endpoint device as an index or "step" in a range of
+      // discrete volume levels. Output value nStepCount is the number of steps
+      // in the range. Output value nStep is the step index of the current
+      // volume level. If the number of steps is n = nStepCount, then step index
+      // nStep can assume values from 0 (minimum volume) to n ?1 (maximum
+      // volume).
+      UINT nStep(0);
+      UINT nStepCount(0);
+      hr = pEndpointVolume->GetVolumeStepInfo(&nStep, &nStepCount);
+      CONTINUE_ON_ERROR(hr);
+      LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
+                      << nStepCount << " (nStepCount)";
+    }
+  Next:
+    if (FAILED(hr)) {
+      LOG(LS_VERBOSE) << "Error when logging device information";
+    }
     CoTaskMemFree(pwszID);
     pwszID = NULL;
-    SAFE_RELEASE(pCollection);
+    PropVariantClear(&varName);
+    SAFE_RELEASE(pProps);
     SAFE_RELEASE(pEndpoint);
     SAFE_RELEASE(pEndpointVolume);
-    SAFE_RELEASE(pProps);
-    return -1;
+  }
+  SAFE_RELEASE(pCollection);
+  return 0;
+
+Exit:
+  _TraceCOMError(hr);
+  CoTaskMemFree(pwszID);
+  pwszID = NULL;
+  SAFE_RELEASE(pCollection);
+  SAFE_RELEASE(pEndpoint);
+  SAFE_RELEASE(pEndpointVolume);
+  SAFE_RELEASE(pProps);
+  return -1;
 }
 
 // ----------------------------------------------------------------------------
 //  _TraceCOMError
 // ----------------------------------------------------------------------------
 
-void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
-{
-    TCHAR buf[MAXERRORLENGTH];
-    TCHAR errorText[MAXERRORLENGTH];
+void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const {
+  TCHAR buf[MAXERRORLENGTH];
+  TCHAR errorText[MAXERRORLENGTH];
 
-    const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
-                          FORMAT_MESSAGE_IGNORE_INSERTS;
-    const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+  const DWORD dwFlags =
+      FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+  const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
 
-    // Gets the system's human readable message string for this HRESULT.
-    // All error message in English by default.
-    DWORD messageLength = ::FormatMessageW(dwFlags,
-                                           0,
-                                           hr,
-                                           dwLangID,
-                                           errorText,
-                                           MAXERRORLENGTH,
-                                           NULL);
+  // Gets the system's human readable message string for this HRESULT.
+  // All error message in English by default.
+  DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+                                         MAXERRORLENGTH, NULL);
 
-    assert(messageLength <= MAXERRORLENGTH);
+  assert(messageLength <= MAXERRORLENGTH);
 
-    // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
-    for (; messageLength && ::isspace(errorText[messageLength - 1]);
-         --messageLength)
-    {
-        errorText[messageLength - 1] = '\0';
-    }
+  // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+  for (; messageLength && ::isspace(errorText[messageLength - 1]);
+       --messageLength) {
+    errorText[messageLength - 1] = '\0';
+  }
 
-    LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
-    StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
-    StringCchCat(buf, MAXERRORLENGTH, errorText);
-    LOG(LS_ERROR) << WideToUTF8(buf);
+  LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
+  StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
+  StringCchCat(buf, MAXERRORLENGTH, errorText);
+  LOG(LS_ERROR) << WideToUTF8(buf);
 }
 
 // ----------------------------------------------------------------------------
@@ -4838,29 +4310,27 @@
 
 char* AudioDeviceWindowsCore::WideToUTF8(const TCHAR* src) const {
 #ifdef UNICODE
-    const size_t kStrLen = sizeof(_str);
-    memset(_str, 0, kStrLen);
-    // Get required size (in bytes) to be able to complete the conversion.
-    unsigned int required_size = (unsigned int)WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
-    if (required_size <= kStrLen)
-    {
-        // Process the entire input string, including the terminating null char.
-        if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
-            memset(_str, 0, kStrLen);
-    }
-    return _str;
+  const size_t kStrLen = sizeof(_str);
+  memset(_str, 0, kStrLen);
+  // Get required size (in bytes) to be able to complete the conversion.
+  unsigned int required_size =
+      (unsigned int)WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
+  if (required_size <= kStrLen) {
+    // Process the entire input string, including the terminating null char.
+    if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
+      memset(_str, 0, kStrLen);
+  }
+  return _str;
 #else
-    return const_cast<char*>(src);
+  return const_cast<char*>(src);
 #endif
 }
 
-
-bool AudioDeviceWindowsCore::KeyPressed() const{
-
+bool AudioDeviceWindowsCore::KeyPressed() const {
   int key_down = 0;
   for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
     short res = GetAsyncKeyState(key);
-    key_down |= res & 0x1; // Get the LSB
+    key_down |= res & 0x1;  // Get the LSB
   }
   return (key_down > 0);
 }
diff --git a/modules/media_file/media_file_impl.cc b/modules/media_file/media_file_impl.cc
index 80b407d..7aaf30d 100644
--- a/modules/media_file/media_file_impl.cc
+++ b/modules/media_file/media_file_impl.cc
@@ -16,14 +16,12 @@
 #include "system_wrappers/include/file_wrapper.h"
 
 namespace webrtc {
-MediaFile* MediaFile::CreateMediaFile(const int32_t id)
-{
-    return new MediaFileImpl(id);
+MediaFile* MediaFile::CreateMediaFile(const int32_t id) {
+  return new MediaFileImpl(id);
 }
 
-void MediaFile::DestroyMediaFile(MediaFile* module)
-{
-    delete static_cast<MediaFileImpl*>(module);
+void MediaFile::DestroyMediaFile(MediaFile* module) {
+  delete static_cast<MediaFileImpl*>(module);
 }
 
 MediaFileImpl::MediaFileImpl(const int32_t id)
@@ -41,330 +39,270 @@
       _isStereo(false),
       _openFile(false),
       _fileName(),
-      _ptrCallback(NULL)
-{
-    LOG(LS_INFO) << "MediaFileImpl()";
+      _ptrCallback(NULL) {
+  LOG(LS_INFO) << "MediaFileImpl()";
 
-    codec_info_.plname[0] = '\0';
-    _fileName[0] = '\0';
+  codec_info_.plname[0] = '\0';
+  _fileName[0] = '\0';
 }
 
+MediaFileImpl::~MediaFileImpl() {
+  LOG(LS_INFO) << "~MediaFileImpl()";
+  {
+    rtc::CritScope lock(&_crit);
 
-MediaFileImpl::~MediaFileImpl()
-{
-    LOG(LS_INFO) << "~MediaFileImpl()";
-    {
-        rtc::CritScope lock(&_crit);
-
-        if(_playingActive)
-        {
-            StopPlaying();
-        }
-
-        if(_recordingActive)
-        {
-            StopRecording();
-        }
-
-        delete _ptrFileUtilityObj;
-
-        if(_openFile)
-        {
-            delete _ptrInStream;
-            _ptrInStream = NULL;
-            delete _ptrOutStream;
-            _ptrOutStream = NULL;
-        }
+    if (_playingActive) {
+      StopPlaying();
     }
+
+    if (_recordingActive) {
+      StopRecording();
+    }
+
+    delete _ptrFileUtilityObj;
+
+    if (_openFile) {
+      delete _ptrInStream;
+      _ptrInStream = NULL;
+      delete _ptrOutStream;
+      _ptrOutStream = NULL;
+    }
+  }
 }
 
-int64_t MediaFileImpl::TimeUntilNextProcess()
-{
-    LOG(LS_WARNING)
-        << "TimeUntilNextProcess: This method is not used by MediaFile class.";
-    return -1;
+int64_t MediaFileImpl::TimeUntilNextProcess() {
+  LOG(LS_WARNING)
+      << "TimeUntilNextProcess: This method is not used by MediaFile class.";
+  return -1;
 }
 
-void MediaFileImpl::Process()
-{
-    LOG(LS_WARNING) << "Process: This method is not used by MediaFile class.";
+void MediaFileImpl::Process() {
+  LOG(LS_WARNING) << "Process: This method is not used by MediaFile class.";
 }
 
 int32_t MediaFileImpl::PlayoutAudioData(int8_t* buffer,
-                                        size_t& dataLengthInBytes)
-{
-    LOG(LS_INFO) << "MediaFileImpl::PlayoutData(buffer= "
-                 << static_cast<void*>(buffer)
-                 << ", bufLen= " << dataLengthInBytes << ")";
+                                        size_t& dataLengthInBytes) {
+  LOG(LS_INFO) << "MediaFileImpl::PlayoutData(buffer= "
+               << static_cast<void*>(buffer)
+               << ", bufLen= " << dataLengthInBytes << ")";
 
-    const size_t bufferLengthInBytes = dataLengthInBytes;
-    dataLengthInBytes = 0;
+  const size_t bufferLengthInBytes = dataLengthInBytes;
+  dataLengthInBytes = 0;
 
-    if(buffer == NULL || bufferLengthInBytes == 0)
-    {
-        LOG(LS_ERROR) << "Buffer pointer or length is NULL!";
-        return -1;
-    }
+  if (buffer == NULL || bufferLengthInBytes == 0) {
+    LOG(LS_ERROR) << "Buffer pointer or length is NULL!";
+    return -1;
+  }
 
-    int32_t bytesRead = 0;
-    {
-        rtc::CritScope lock(&_crit);
+  int32_t bytesRead = 0;
+  {
+    rtc::CritScope lock(&_crit);
 
-        if(!_playingActive)
-        {
-            LOG(LS_WARNING) << "Not currently playing!";
-            return -1;
-        }
-
-        if(!_ptrFileUtilityObj)
-        {
-            LOG(LS_ERROR) << "Playing, but no FileUtility object!";
-            StopPlaying();
-            return -1;
-        }
-
-        switch(_fileFormat)
-        {
-            case kFileFormatPcm48kHzFile:
-            case kFileFormatPcm32kHzFile:
-            case kFileFormatPcm16kHzFile:
-            case kFileFormatPcm8kHzFile:
-                bytesRead = _ptrFileUtilityObj->ReadPCMData(
-                    *_ptrInStream,
-                    buffer,
-                    bufferLengthInBytes);
-                break;
-            case kFileFormatCompressedFile:
-                bytesRead = _ptrFileUtilityObj->ReadCompressedData(
-                    *_ptrInStream,
-                    buffer,
-                    bufferLengthInBytes);
-                break;
-            case kFileFormatWavFile:
-                bytesRead = _ptrFileUtilityObj->ReadWavDataAsMono(
-                    *_ptrInStream,
-                    buffer,
-                    bufferLengthInBytes);
-                break;
-            case kFileFormatPreencodedFile:
-                bytesRead = _ptrFileUtilityObj->ReadPreEncodedData(
-                    *_ptrInStream,
-                    buffer,
-                    bufferLengthInBytes);
-                if(bytesRead > 0)
-                {
-                    dataLengthInBytes = static_cast<size_t>(bytesRead);
-                    return 0;
-                }
-                break;
-            default:
-            {
-                LOG(LS_ERROR) << "Invalid file format: " << _fileFormat;
-                assert(false);
-                break;
-            }
-        }
-
-        if( bytesRead > 0)
-        {
-            dataLengthInBytes = static_cast<size_t>(bytesRead);
-        }
-    }
-    HandlePlayCallbacks(bytesRead);
-    return 0;
-}
-
-void MediaFileImpl::HandlePlayCallbacks(int32_t bytesRead)
-{
-    bool playEnded = false;
-    uint32_t callbackNotifyMs = 0;
-
-    if(bytesRead > 0)
-    {
-        // Check if it's time for PlayNotification(..).
-        _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
-        if(_notificationMs)
-        {
-            if(_playoutPositionMs >= _notificationMs)
-            {
-                _notificationMs = 0;
-                callbackNotifyMs = _playoutPositionMs;
-            }
-        }
-    }
-    else
-    {
-        // If no bytes were read assume end of file.
-        StopPlaying();
-        playEnded = true;
-    }
-
-    // Only _callbackCrit may and should be taken when making callbacks.
-    rtc::CritScope lock(&_callbackCrit);
-    if(_ptrCallback)
-    {
-        if(callbackNotifyMs)
-        {
-            _ptrCallback->PlayNotification(_id, callbackNotifyMs);
-        }
-        if(playEnded)
-        {
-            _ptrCallback->PlayFileEnded(_id);
-        }
-    }
-}
-
-int32_t MediaFileImpl::PlayoutStereoData(
-    int8_t* bufferLeft,
-    int8_t* bufferRight,
-    size_t& dataLengthInBytes)
-{
-    LOG(LS_INFO)
-        << "MediaFileImpl::PlayoutStereoData(Left = "
-        << static_cast<void*>(bufferLeft) << ", Right = "
-        << static_cast<void*>(bufferRight) << ", Len= " << dataLengthInBytes
-        << ")";
-
-    const size_t bufferLengthInBytes = dataLengthInBytes;
-    dataLengthInBytes = 0;
-
-    if(bufferLeft == NULL || bufferRight == NULL || bufferLengthInBytes == 0)
-    {
-        LOG(LS_ERROR) << "A buffer pointer or the length is NULL!";
-        return -1;
-    }
-
-    bool playEnded = false;
-    uint32_t callbackNotifyMs = 0;
-    {
-        rtc::CritScope lock(&_crit);
-
-        if(!_playingActive || !_isStereo)
-        {
-            LOG(LS_WARNING) << "Not currently playing stereo!";
-            return -1;
-        }
-
-        if(!_ptrFileUtilityObj)
-        {
-            LOG(LS_ERROR)
-                << "Playing stereo, but the FileUtility objects is NULL!";
-            StopPlaying();
-            return -1;
-        }
-
-        // Stereo playout only supported for WAV files.
-        int32_t bytesRead = 0;
-        switch(_fileFormat)
-        {
-            case kFileFormatWavFile:
-                    bytesRead = _ptrFileUtilityObj->ReadWavDataAsStereo(
-                        *_ptrInStream,
-                        bufferLeft,
-                        bufferRight,
-                        bufferLengthInBytes);
-                    break;
-            default:
-                LOG(LS_ERROR)
-                    << "Trying to read non-WAV as stereo audio (not supported)";
-                break;
-        }
-
-        if(bytesRead > 0)
-        {
-            dataLengthInBytes = static_cast<size_t>(bytesRead);
-
-            // Check if it's time for PlayNotification(..).
-            _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
-            if(_notificationMs)
-            {
-                if(_playoutPositionMs >= _notificationMs)
-                {
-                    _notificationMs = 0;
-                    callbackNotifyMs = _playoutPositionMs;
-                }
-            }
-        }
-        else
-        {
-            // If no bytes were read assume end of file.
-            StopPlaying();
-            playEnded = true;
-        }
-    }
-
-    rtc::CritScope lock(&_callbackCrit);
-    if(_ptrCallback)
-    {
-        if(callbackNotifyMs)
-        {
-            _ptrCallback->PlayNotification(_id, callbackNotifyMs);
-        }
-        if(playEnded)
-        {
-            _ptrCallback->PlayFileEnded(_id);
-        }
-    }
-    return 0;
-}
-
-int32_t MediaFileImpl::StartPlayingAudioFile(
-    const char* fileName,
-    const uint32_t notificationTimeMs,
-    const bool loop,
-    const FileFormats format,
-    const CodecInst* codecInst,
-    const uint32_t startPointMs,
-    const uint32_t stopPointMs)
-{
-    if(!ValidFileName(fileName))
-    {
-        return -1;
-    }
-    if(!ValidFileFormat(format,codecInst))
-    {
-        return -1;
-    }
-    if(!ValidFilePositions(startPointMs,stopPointMs))
-    {
-        return -1;
-    }
-
-    // Check that the file will play longer than notificationTimeMs ms.
-    if((startPointMs && stopPointMs && !loop) &&
-       (notificationTimeMs > (stopPointMs - startPointMs)))
-    {
-        LOG(LS_ERROR) << "specified notification time is longer than amount of"
-                      << " ms that will be played";
-        return -1;
-    }
-
-    FileWrapper* inputStream = FileWrapper::Create();
-    if(inputStream == NULL)
-    {
-       LOG(LS_INFO) << "Failed to allocate input stream for file " << fileName;
-        return -1;
-    }
-
-    if (!inputStream->OpenFile(fileName, true)) {
-      delete inputStream;
-      LOG(LS_ERROR) << "Could not open input file " << fileName;
+    if (!_playingActive) {
+      LOG(LS_WARNING) << "Not currently playing!";
       return -1;
     }
 
-    if(StartPlayingStream(*inputStream, loop, notificationTimeMs,
-                          format, codecInst, startPointMs, stopPointMs) == -1)
-    {
-        inputStream->CloseFile();
-        delete inputStream;
-        return -1;
+    if (!_ptrFileUtilityObj) {
+      LOG(LS_ERROR) << "Playing, but no FileUtility object!";
+      StopPlaying();
+      return -1;
     }
 
+    switch (_fileFormat) {
+      case kFileFormatPcm48kHzFile:
+      case kFileFormatPcm32kHzFile:
+      case kFileFormatPcm16kHzFile:
+      case kFileFormatPcm8kHzFile:
+        bytesRead = _ptrFileUtilityObj->ReadPCMData(*_ptrInStream, buffer,
+                                                    bufferLengthInBytes);
+        break;
+      case kFileFormatCompressedFile:
+        bytesRead = _ptrFileUtilityObj->ReadCompressedData(
+            *_ptrInStream, buffer, bufferLengthInBytes);
+        break;
+      case kFileFormatWavFile:
+        bytesRead = _ptrFileUtilityObj->ReadWavDataAsMono(*_ptrInStream, buffer,
+                                                          bufferLengthInBytes);
+        break;
+      case kFileFormatPreencodedFile:
+        bytesRead = _ptrFileUtilityObj->ReadPreEncodedData(
+            *_ptrInStream, buffer, bufferLengthInBytes);
+        if (bytesRead > 0) {
+          dataLengthInBytes = static_cast<size_t>(bytesRead);
+          return 0;
+        }
+        break;
+      default: {
+        LOG(LS_ERROR) << "Invalid file format: " << _fileFormat;
+        assert(false);
+        break;
+      }
+    }
+
+    if (bytesRead > 0) {
+      dataLengthInBytes = static_cast<size_t>(bytesRead);
+    }
+  }
+  HandlePlayCallbacks(bytesRead);
+  return 0;
+}
+
+void MediaFileImpl::HandlePlayCallbacks(int32_t bytesRead) {
+  bool playEnded = false;
+  uint32_t callbackNotifyMs = 0;
+
+  if (bytesRead > 0) {
+    // Check if it's time for PlayNotification(..).
+    _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
+    if (_notificationMs) {
+      if (_playoutPositionMs >= _notificationMs) {
+        _notificationMs = 0;
+        callbackNotifyMs = _playoutPositionMs;
+      }
+    }
+  } else {
+    // If no bytes were read assume end of file.
+    StopPlaying();
+    playEnded = true;
+  }
+
+  // Only _callbackCrit may and should be taken when making callbacks.
+  rtc::CritScope lock(&_callbackCrit);
+  if (_ptrCallback) {
+    if (callbackNotifyMs) {
+      _ptrCallback->PlayNotification(_id, callbackNotifyMs);
+    }
+    if (playEnded) {
+      _ptrCallback->PlayFileEnded(_id);
+    }
+  }
+}
+
+int32_t MediaFileImpl::PlayoutStereoData(int8_t* bufferLeft,
+                                         int8_t* bufferRight,
+                                         size_t& dataLengthInBytes) {
+  LOG(LS_INFO) << "MediaFileImpl::PlayoutStereoData(Left = "
+               << static_cast<void*>(bufferLeft)
+               << ", Right = " << static_cast<void*>(bufferRight)
+               << ", Len= " << dataLengthInBytes << ")";
+
+  const size_t bufferLengthInBytes = dataLengthInBytes;
+  dataLengthInBytes = 0;
+
+  if (bufferLeft == NULL || bufferRight == NULL || bufferLengthInBytes == 0) {
+    LOG(LS_ERROR) << "A buffer pointer or the length is NULL!";
+    return -1;
+  }
+
+  bool playEnded = false;
+  uint32_t callbackNotifyMs = 0;
+  {
     rtc::CritScope lock(&_crit);
-    _openFile = true;
-    strncpy(_fileName, fileName, sizeof(_fileName));
-    _fileName[sizeof(_fileName) - 1] = '\0';
-    return 0;
+
+    if (!_playingActive || !_isStereo) {
+      LOG(LS_WARNING) << "Not currently playing stereo!";
+      return -1;
+    }
+
+    if (!_ptrFileUtilityObj) {
+      LOG(LS_ERROR) << "Playing stereo, but the FileUtility objects is NULL!";
+      StopPlaying();
+      return -1;
+    }
+
+    // Stereo playout only supported for WAV files.
+    int32_t bytesRead = 0;
+    switch (_fileFormat) {
+      case kFileFormatWavFile:
+        bytesRead = _ptrFileUtilityObj->ReadWavDataAsStereo(
+            *_ptrInStream, bufferLeft, bufferRight, bufferLengthInBytes);
+        break;
+      default:
+        LOG(LS_ERROR)
+            << "Trying to read non-WAV as stereo audio (not supported)";
+        break;
+    }
+
+    if (bytesRead > 0) {
+      dataLengthInBytes = static_cast<size_t>(bytesRead);
+
+      // Check if it's time for PlayNotification(..).
+      _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
+      if (_notificationMs) {
+        if (_playoutPositionMs >= _notificationMs) {
+          _notificationMs = 0;
+          callbackNotifyMs = _playoutPositionMs;
+        }
+      }
+    } else {
+      // If no bytes were read assume end of file.
+      StopPlaying();
+      playEnded = true;
+    }
+  }
+
+  rtc::CritScope lock(&_callbackCrit);
+  if (_ptrCallback) {
+    if (callbackNotifyMs) {
+      _ptrCallback->PlayNotification(_id, callbackNotifyMs);
+    }
+    if (playEnded) {
+      _ptrCallback->PlayFileEnded(_id);
+    }
+  }
+  return 0;
+}
+
+int32_t MediaFileImpl::StartPlayingAudioFile(const char* fileName,
+                                             const uint32_t notificationTimeMs,
+                                             const bool loop,
+                                             const FileFormats format,
+                                             const CodecInst* codecInst,
+                                             const uint32_t startPointMs,
+                                             const uint32_t stopPointMs) {
+  if (!ValidFileName(fileName)) {
+    return -1;
+  }
+  if (!ValidFileFormat(format, codecInst)) {
+    return -1;
+  }
+  if (!ValidFilePositions(startPointMs, stopPointMs)) {
+    return -1;
+  }
+
+  // Check that the file will play longer than notificationTimeMs ms.
+  if ((startPointMs && stopPointMs && !loop) &&
+      (notificationTimeMs > (stopPointMs - startPointMs))) {
+    LOG(LS_ERROR) << "specified notification time is longer than amount of"
+                  << " ms that will be played";
+    return -1;
+  }
+
+  FileWrapper* inputStream = FileWrapper::Create();
+  if (inputStream == NULL) {
+    LOG(LS_INFO) << "Failed to allocate input stream for file " << fileName;
+    return -1;
+  }
+
+  if (!inputStream->OpenFile(fileName, true)) {
+    delete inputStream;
+    LOG(LS_ERROR) << "Could not open input file " << fileName;
+    return -1;
+  }
+
+  if (StartPlayingStream(*inputStream, loop, notificationTimeMs, format,
+                         codecInst, startPointMs, stopPointMs) == -1) {
+    inputStream->CloseFile();
+    delete inputStream;
+    return -1;
+  }
+
+  rtc::CritScope lock(&_crit);
+  _openFile = true;
+  strncpy(_fileName, fileName, sizeof(_fileName));
+  _fileName[sizeof(_fileName) - 1] = '\0';
+  return 0;
 }
 
 int32_t MediaFileImpl::StartPlayingAudioStream(
@@ -373,311 +311,260 @@
     const FileFormats format,
     const CodecInst* codecInst,
     const uint32_t startPointMs,
-    const uint32_t stopPointMs)
-{
-    return StartPlayingStream(stream, false, notificationTimeMs, format,
-                              codecInst, startPointMs, stopPointMs);
+    const uint32_t stopPointMs) {
+  return StartPlayingStream(stream, false, notificationTimeMs, format,
+                            codecInst, startPointMs, stopPointMs);
 }
 
-int32_t MediaFileImpl::StartPlayingStream(
-    InStream& stream,
-    bool loop,
-    const uint32_t notificationTimeMs,
-    const FileFormats format,
-    const CodecInst*  codecInst,
-    const uint32_t startPointMs,
-    const uint32_t stopPointMs)
-{
-    if(!ValidFileFormat(format,codecInst))
-    {
-        return -1;
-    }
+int32_t MediaFileImpl::StartPlayingStream(InStream& stream,
+                                          bool loop,
+                                          const uint32_t notificationTimeMs,
+                                          const FileFormats format,
+                                          const CodecInst* codecInst,
+                                          const uint32_t startPointMs,
+                                          const uint32_t stopPointMs) {
+  if (!ValidFileFormat(format, codecInst)) {
+    return -1;
+  }
 
-    if(!ValidFilePositions(startPointMs,stopPointMs))
-    {
-        return -1;
-    }
+  if (!ValidFilePositions(startPointMs, stopPointMs)) {
+    return -1;
+  }
 
-    rtc::CritScope lock(&_crit);
-    if(_playingActive || _recordingActive)
-    {
-        LOG(LS_ERROR)
-            << "StartPlaying called, but already playing or recording file "
-            << ((_fileName[0] == '\0') ? "(name not set)" : _fileName);
-        return -1;
-    }
+  rtc::CritScope lock(&_crit);
+  if (_playingActive || _recordingActive) {
+    LOG(LS_ERROR)
+        << "StartPlaying called, but already playing or recording file "
+        << ((_fileName[0] == '\0') ? "(name not set)" : _fileName);
+    return -1;
+  }
 
-    if(_ptrFileUtilityObj != NULL)
-    {
-        LOG(LS_ERROR)
-            << "StartPlaying called, but FileUtilityObj already exists!";
+  if (_ptrFileUtilityObj != NULL) {
+    LOG(LS_ERROR) << "StartPlaying called, but FileUtilityObj already exists!";
+    StopPlaying();
+    return -1;
+  }
+
+  _ptrFileUtilityObj = new ModuleFileUtility();
+  if (_ptrFileUtilityObj == NULL) {
+    LOG(LS_INFO) << "Failed to create FileUtilityObj!";
+    return -1;
+  }
+
+  switch (format) {
+    case kFileFormatWavFile: {
+      if (_ptrFileUtilityObj->InitWavReading(stream, startPointMs,
+                                             stopPointMs) == -1) {
+        LOG(LS_ERROR) << "Not a valid WAV file!";
         StopPlaying();
         return -1;
+      }
+      _fileFormat = kFileFormatWavFile;
+      break;
     }
-
-    _ptrFileUtilityObj = new ModuleFileUtility();
-    if(_ptrFileUtilityObj == NULL)
-    {
-        LOG(LS_INFO) << "Failed to create FileUtilityObj!";
+    case kFileFormatCompressedFile: {
+      if (_ptrFileUtilityObj->InitCompressedReading(stream, startPointMs,
+                                                    stopPointMs) == -1) {
+        LOG(LS_ERROR) << "Not a valid Compressed file!";
+        StopPlaying();
         return -1;
+      }
+      _fileFormat = kFileFormatCompressedFile;
+      break;
+    }
+    case kFileFormatPcm8kHzFile:
+    case kFileFormatPcm16kHzFile:
+    case kFileFormatPcm32kHzFile:
+    case kFileFormatPcm48kHzFile: {
+      // ValidFileFormat() called in the beginneing of this function
+      // prevents codecInst from being NULL here.
+      assert(codecInst != NULL);
+      if (!ValidFrequency(codecInst->plfreq) ||
+          _ptrFileUtilityObj->InitPCMReading(stream, startPointMs, stopPointMs,
+                                             codecInst->plfreq) == -1) {
+        LOG(LS_ERROR) << "Not a valid raw 8 or 16 KHz PCM file!";
+        StopPlaying();
+        return -1;
+      }
+
+      _fileFormat = format;
+      break;
+    }
+    case kFileFormatPreencodedFile: {
+      // ValidFileFormat() called in the beginneing of this function
+      // prevents codecInst from being NULL here.
+      assert(codecInst != NULL);
+      if (_ptrFileUtilityObj->InitPreEncodedReading(stream, *codecInst) == -1) {
+        LOG(LS_ERROR) << "Not a valid PreEncoded file!";
+        StopPlaying();
+        return -1;
+      }
+
+      _fileFormat = kFileFormatPreencodedFile;
+      break;
+    }
+    default: {
+      LOG(LS_ERROR) << "Invalid file format: " << format;
+      assert(false);
+      break;
+    }
+  }
+  if (_ptrFileUtilityObj->codec_info(codec_info_) == -1) {
+    LOG(LS_ERROR) << "Failed to retrieve codec info!";
+    StopPlaying();
+    return -1;
+  }
+
+  _isStereo = (codec_info_.channels == 2);
+  if (_isStereo && (_fileFormat != kFileFormatWavFile)) {
+    LOG(LS_WARNING) << "Stereo is only allowed for WAV files";
+    StopPlaying();
+    return -1;
+  }
+  _playingActive = true;
+  _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
+  _ptrInStream = &stream;
+  _notificationMs = notificationTimeMs;
+
+  return 0;
+}
+
+int32_t MediaFileImpl::StopPlaying() {
+  rtc::CritScope lock(&_crit);
+  _isStereo = false;
+  if (_ptrFileUtilityObj) {
+    delete _ptrFileUtilityObj;
+    _ptrFileUtilityObj = NULL;
+  }
+  if (_ptrInStream) {
+    // If MediaFileImpl opened the InStream it must be reclaimed here.
+    if (_openFile) {
+      delete _ptrInStream;
+      _openFile = false;
+    }
+    _ptrInStream = NULL;
+  }
+
+  codec_info_.pltype = 0;
+  codec_info_.plname[0] = '\0';
+
+  if (!_playingActive) {
+    LOG(LS_WARNING) << "playing is not active!";
+    return -1;
+  }
+
+  _playingActive = false;
+  return 0;
+}
+
+bool MediaFileImpl::IsPlaying() {
+  LOG(LS_VERBOSE) << "MediaFileImpl::IsPlaying()";
+  rtc::CritScope lock(&_crit);
+  return _playingActive;
+}
+
+int32_t MediaFileImpl::IncomingAudioData(const int8_t* buffer,
+                                         const size_t bufferLengthInBytes) {
+  LOG(LS_INFO) << "MediaFile::IncomingData(buffer= "
+               << static_cast<const void*>(buffer)
+               << ", bufLen= " << bufferLengthInBytes << ")";
+
+  if (buffer == NULL || bufferLengthInBytes == 0) {
+    LOG(LS_ERROR) << "Buffer pointer or length is NULL!";
+    return -1;
+  }
+
+  bool recordingEnded = false;
+  uint32_t callbackNotifyMs = 0;
+  {
+    rtc::CritScope lock(&_crit);
+
+    if (!_recordingActive) {
+      LOG(LS_WARNING) << "Not currently recording!";
+      return -1;
+    }
+    if (_ptrOutStream == NULL) {
+      LOG(LS_ERROR) << "Recording is active, but output stream is NULL!";
+      assert(false);
+      return -1;
     }
 
-    switch(format)
-    {
-        case kFileFormatWavFile:
-        {
-            if(_ptrFileUtilityObj->InitWavReading(stream, startPointMs,
-                                                  stopPointMs) == -1)
-            {
-                LOG(LS_ERROR) << "Not a valid WAV file!";
-                StopPlaying();
-                return -1;
-            }
-            _fileFormat = kFileFormatWavFile;
-            break;
-        }
-        case kFileFormatCompressedFile:
-        {
-            if(_ptrFileUtilityObj->InitCompressedReading(stream, startPointMs,
-                                                         stopPointMs) == -1)
-            {
-                LOG(LS_ERROR) << "Not a valid Compressed file!";
-                StopPlaying();
-                return -1;
-            }
-            _fileFormat = kFileFormatCompressedFile;
-            break;
-        }
+    int32_t bytesWritten = 0;
+    uint32_t samplesWritten = codec_info_.pacsize;
+    if (_ptrFileUtilityObj) {
+      switch (_fileFormat) {
         case kFileFormatPcm8kHzFile:
         case kFileFormatPcm16kHzFile:
         case kFileFormatPcm32kHzFile:
         case kFileFormatPcm48kHzFile:
-        {
-            // ValidFileFormat() called in the beginneing of this function
-            // prevents codecInst from being NULL here.
-            assert(codecInst != NULL);
-            if(!ValidFrequency(codecInst->plfreq) ||
-               _ptrFileUtilityObj->InitPCMReading(stream, startPointMs,
-                                                  stopPointMs,
-                                                  codecInst->plfreq) == -1)
-            {
-                LOG(LS_ERROR) << "Not a valid raw 8 or 16 KHz PCM file!";
-                StopPlaying();
-                return -1;
-            }
+          bytesWritten = _ptrFileUtilityObj->WritePCMData(
+              *_ptrOutStream, buffer, bufferLengthInBytes);
 
-            _fileFormat = format;
-            break;
-        }
+          // Sample size is 2 bytes.
+          if (bytesWritten > 0) {
+            samplesWritten = bytesWritten / sizeof(int16_t);
+          }
+          break;
+        case kFileFormatCompressedFile:
+          bytesWritten = _ptrFileUtilityObj->WriteCompressedData(
+              *_ptrOutStream, buffer, bufferLengthInBytes);
+          break;
+        case kFileFormatWavFile:
+          bytesWritten = _ptrFileUtilityObj->WriteWavData(
+              *_ptrOutStream, buffer, bufferLengthInBytes);
+          if (bytesWritten > 0 &&
+              STR_NCASE_CMP(codec_info_.plname, "L16", 4) == 0) {
+            // Sample size is 2 bytes.
+            samplesWritten = bytesWritten / sizeof(int16_t);
+          }
+          break;
         case kFileFormatPreencodedFile:
-        {
-            // ValidFileFormat() called in the beginneing of this function
-            // prevents codecInst from being NULL here.
-            assert(codecInst != NULL);
-            if(_ptrFileUtilityObj->InitPreEncodedReading(stream, *codecInst) ==
-               -1)
-            {
-                LOG(LS_ERROR) << "Not a valid PreEncoded file!";
-                StopPlaying();
-                return -1;
-            }
-
-            _fileFormat = kFileFormatPreencodedFile;
-            break;
-        }
+          bytesWritten = _ptrFileUtilityObj->WritePreEncodedData(
+              *_ptrOutStream, buffer, bufferLengthInBytes);
+          break;
         default:
-        {
-            LOG(LS_ERROR) << "Invalid file format: " << format;
-            assert(false);
-            break;
+          LOG(LS_ERROR) << "Invalid file format: " << _fileFormat;
+          assert(false);
+          break;
+      }
+    } else {
+      // TODO (hellner): quick look at the code makes me think that this
+      //                 code is never executed. Remove?
+      if (_ptrOutStream) {
+        if (_ptrOutStream->Write(buffer, bufferLengthInBytes)) {
+          bytesWritten = static_cast<int32_t>(bufferLengthInBytes);
         }
-    }
-    if(_ptrFileUtilityObj->codec_info(codec_info_) == -1)
-    {
-        LOG(LS_ERROR) << "Failed to retrieve codec info!";
-        StopPlaying();
-        return -1;
+      }
     }
 
-    _isStereo = (codec_info_.channels == 2);
-    if(_isStereo && (_fileFormat != kFileFormatWavFile))
-    {
-        LOG(LS_WARNING) << "Stereo is only allowed for WAV files";
-        StopPlaying();
-        return -1;
+    _recordDurationMs += samplesWritten / (codec_info_.plfreq / 1000);
+
+    // Check if it's time for RecordNotification(..).
+    if (_notificationMs) {
+      if (_recordDurationMs >= _notificationMs) {
+        _notificationMs = 0;
+        callbackNotifyMs = _recordDurationMs;
+      }
     }
-    _playingActive = true;
-    _playoutPositionMs = _ptrFileUtilityObj->PlayoutPositionMs();
-    _ptrInStream = &stream;
-    _notificationMs = notificationTimeMs;
-
-    return 0;
-}
-
-int32_t MediaFileImpl::StopPlaying()
-{
-
-    rtc::CritScope lock(&_crit);
-    _isStereo = false;
-    if(_ptrFileUtilityObj)
-    {
-        delete _ptrFileUtilityObj;
-        _ptrFileUtilityObj = NULL;
+    if (bytesWritten < (int32_t)bufferLengthInBytes) {
+      LOG(LS_WARNING) << "Failed to write all requested bytes!";
+      StopRecording();
+      recordingEnded = true;
     }
-    if(_ptrInStream)
-    {
-        // If MediaFileImpl opened the InStream it must be reclaimed here.
-        if(_openFile)
-        {
-            delete _ptrInStream;
-            _openFile = false;
-        }
-        _ptrInStream = NULL;
+  }
+
+  // Only _callbackCrit may and should be taken when making callbacks.
+  rtc::CritScope lock(&_callbackCrit);
+  if (_ptrCallback) {
+    if (callbackNotifyMs) {
+      _ptrCallback->RecordNotification(_id, callbackNotifyMs);
     }
-
-    codec_info_.pltype = 0;
-    codec_info_.plname[0] = '\0';
-
-    if(!_playingActive)
-    {
-        LOG(LS_WARNING) << "playing is not active!";
-        return -1;
+    if (recordingEnded) {
+      _ptrCallback->RecordFileEnded(_id);
+      return -1;
     }
-
-    _playingActive = false;
-    return 0;
-}
-
-bool MediaFileImpl::IsPlaying()
-{
-    LOG(LS_VERBOSE) << "MediaFileImpl::IsPlaying()";
-    rtc::CritScope lock(&_crit);
-    return _playingActive;
-}
-
-int32_t MediaFileImpl::IncomingAudioData(
-    const int8_t*  buffer,
-    const size_t bufferLengthInBytes)
-{
-    LOG(LS_INFO) << "MediaFile::IncomingData(buffer= "
-                 << static_cast<const void*>(buffer) << ", bufLen= "
-                 << bufferLengthInBytes << ")";
-
-    if(buffer == NULL || bufferLengthInBytes == 0)
-    {
-        LOG(LS_ERROR) << "Buffer pointer or length is NULL!";
-        return -1;
-    }
-
-    bool recordingEnded = false;
-    uint32_t callbackNotifyMs = 0;
-    {
-        rtc::CritScope lock(&_crit);
-
-        if(!_recordingActive)
-        {
-            LOG(LS_WARNING) << "Not currently recording!";
-            return -1;
-        }
-        if(_ptrOutStream == NULL)
-        {
-            LOG(LS_ERROR) << "Recording is active, but output stream is NULL!";
-            assert(false);
-            return -1;
-        }
-
-        int32_t bytesWritten = 0;
-        uint32_t samplesWritten = codec_info_.pacsize;
-        if(_ptrFileUtilityObj)
-        {
-            switch(_fileFormat)
-            {
-                case kFileFormatPcm8kHzFile:
-                case kFileFormatPcm16kHzFile:
-                case kFileFormatPcm32kHzFile:
-                case kFileFormatPcm48kHzFile:
-                    bytesWritten = _ptrFileUtilityObj->WritePCMData(
-                        *_ptrOutStream,
-                        buffer,
-                        bufferLengthInBytes);
-
-                    // Sample size is 2 bytes.
-                    if(bytesWritten > 0)
-                    {
-                        samplesWritten = bytesWritten/sizeof(int16_t);
-                    }
-                    break;
-                case kFileFormatCompressedFile:
-                    bytesWritten = _ptrFileUtilityObj->WriteCompressedData(
-                        *_ptrOutStream, buffer, bufferLengthInBytes);
-                    break;
-                case kFileFormatWavFile:
-                    bytesWritten = _ptrFileUtilityObj->WriteWavData(
-                        *_ptrOutStream,
-                        buffer,
-                        bufferLengthInBytes);
-                    if(bytesWritten > 0 && STR_NCASE_CMP(codec_info_.plname,
-                                                         "L16", 4) == 0)
-                    {
-                        // Sample size is 2 bytes.
-                        samplesWritten = bytesWritten/sizeof(int16_t);
-                    }
-                    break;
-                case kFileFormatPreencodedFile:
-                    bytesWritten = _ptrFileUtilityObj->WritePreEncodedData(
-                        *_ptrOutStream, buffer, bufferLengthInBytes);
-                    break;
-                default:
-                    LOG(LS_ERROR) << "Invalid file format: " << _fileFormat;
-                    assert(false);
-                    break;
-            }
-        } else {
-            // TODO (hellner): quick look at the code makes me think that this
-            //                 code is never executed. Remove?
-            if(_ptrOutStream)
-            {
-                if(_ptrOutStream->Write(buffer, bufferLengthInBytes))
-                {
-                    bytesWritten = static_cast<int32_t>(bufferLengthInBytes);
-                }
-            }
-        }
-
-        _recordDurationMs += samplesWritten / (codec_info_.plfreq / 1000);
-
-        // Check if it's time for RecordNotification(..).
-        if(_notificationMs)
-        {
-            if(_recordDurationMs  >= _notificationMs)
-            {
-                _notificationMs = 0;
-                callbackNotifyMs = _recordDurationMs;
-            }
-        }
-        if(bytesWritten < (int32_t)bufferLengthInBytes)
-        {
-            LOG(LS_WARNING) << "Failed to write all requested bytes!";
-            StopRecording();
-            recordingEnded = true;
-        }
-    }
-
-    // Only _callbackCrit may and should be taken when making callbacks.
-    rtc::CritScope lock(&_callbackCrit);
-    if(_ptrCallback)
-    {
-        if(callbackNotifyMs)
-        {
-            _ptrCallback->RecordNotification(_id, callbackNotifyMs);
-        }
-        if(recordingEnded)
-        {
-            _ptrCallback->RecordFileEnded(_id);
-            return -1;
-        }
-    }
-    return 0;
+  }
+  return 0;
 }
 
 int32_t MediaFileImpl::StartRecordingAudioFile(
@@ -685,385 +572,319 @@
     const FileFormats format,
     const CodecInst& codecInst,
     const uint32_t notificationTimeMs,
-    const uint32_t maxSizeBytes)
-{
-    if(!ValidFileName(fileName))
-    {
-        return -1;
-    }
-    if(!ValidFileFormat(format,&codecInst))
-    {
-        return -1;
-    }
+    const uint32_t maxSizeBytes) {
+  if (!ValidFileName(fileName)) {
+    return -1;
+  }
+  if (!ValidFileFormat(format, &codecInst)) {
+    return -1;
+  }
 
-    FileWrapper* outputStream = FileWrapper::Create();
-    if(outputStream == NULL)
-    {
-        LOG(LS_INFO) << "Failed to allocate memory for output stream";
-        return -1;
-    }
+  FileWrapper* outputStream = FileWrapper::Create();
+  if (outputStream == NULL) {
+    LOG(LS_INFO) << "Failed to allocate memory for output stream";
+    return -1;
+  }
 
-    if (!outputStream->OpenFile(fileName, false)) {
-      delete outputStream;
-      LOG(LS_ERROR) << "Could not open output file '" << fileName
-                    << "' for writing!";
-      return -1;
-    }
+  if (!outputStream->OpenFile(fileName, false)) {
+    delete outputStream;
+    LOG(LS_ERROR) << "Could not open output file '" << fileName
+                  << "' for writing!";
+    return -1;
+  }
 
-    if(maxSizeBytes)
-    {
-        outputStream->SetMaxFileSize(maxSizeBytes);
-    }
+  if (maxSizeBytes) {
+    outputStream->SetMaxFileSize(maxSizeBytes);
+  }
 
-    if(StartRecordingAudioStream(*outputStream, format, codecInst,
-                                 notificationTimeMs) == -1)
-    {
-        outputStream->CloseFile();
-        delete outputStream;
-        return -1;
-    }
+  if (StartRecordingAudioStream(*outputStream, format, codecInst,
+                                notificationTimeMs) == -1) {
+    outputStream->CloseFile();
+    delete outputStream;
+    return -1;
+  }
 
-    rtc::CritScope lock(&_crit);
-    _openFile = true;
-    strncpy(_fileName, fileName, sizeof(_fileName));
-    _fileName[sizeof(_fileName) - 1] = '\0';
-    return 0;
+  rtc::CritScope lock(&_crit);
+  _openFile = true;
+  strncpy(_fileName, fileName, sizeof(_fileName));
+  _fileName[sizeof(_fileName) - 1] = '\0';
+  return 0;
 }
 
 int32_t MediaFileImpl::StartRecordingAudioStream(
     OutStream& stream,
     const FileFormats format,
     const CodecInst& codecInst,
-    const uint32_t notificationTimeMs)
-{
-    // Check codec info
-    if(!ValidFileFormat(format,&codecInst))
-    {
-        return -1;
-    }
+    const uint32_t notificationTimeMs) {
+  // Check codec info
+  if (!ValidFileFormat(format, &codecInst)) {
+    return -1;
+  }
 
-    rtc::CritScope lock(&_crit);
-    if(_recordingActive || _playingActive)
-    {
-        LOG(LS_ERROR)
-            << "StartRecording called, but already recording or playing file "
-            << _fileName << "!";
-        return -1;
-    }
+  rtc::CritScope lock(&_crit);
+  if (_recordingActive || _playingActive) {
+    LOG(LS_ERROR)
+        << "StartRecording called, but already recording or playing file "
+        << _fileName << "!";
+    return -1;
+  }
 
-    if(_ptrFileUtilityObj != NULL)
-    {
-        LOG(LS_ERROR)
-            << "StartRecording called, but fileUtilityObj already exists!";
-        StopRecording();
-        return -1;
-    }
+  if (_ptrFileUtilityObj != NULL) {
+    LOG(LS_ERROR)
+        << "StartRecording called, but fileUtilityObj already exists!";
+    StopRecording();
+    return -1;
+  }
 
-    _ptrFileUtilityObj = new ModuleFileUtility();
-    if(_ptrFileUtilityObj == NULL)
-    {
-        LOG(LS_INFO) << "Cannot allocate fileUtilityObj!";
-        return -1;
-    }
+  _ptrFileUtilityObj = new ModuleFileUtility();
+  if (_ptrFileUtilityObj == NULL) {
+    LOG(LS_INFO) << "Cannot allocate fileUtilityObj!";
+    return -1;
+  }
 
-    CodecInst tmpAudioCodec;
-    memcpy(&tmpAudioCodec, &codecInst, sizeof(CodecInst));
-    switch(format)
-    {
-        case kFileFormatWavFile:
-        {
-            if(_ptrFileUtilityObj->InitWavWriting(stream, codecInst) == -1)
-            {
-                LOG(LS_ERROR) << "Failed to initialize WAV file!";
-                delete _ptrFileUtilityObj;
-                _ptrFileUtilityObj = NULL;
-                return -1;
-            }
-            _fileFormat = kFileFormatWavFile;
-            break;
-        }
-        case kFileFormatCompressedFile:
-        {
-            // Write compression codec name at beginning of file
-            if(_ptrFileUtilityObj->InitCompressedWriting(stream, codecInst) ==
-               -1)
-            {
-                LOG(LS_ERROR) << "Failed to initialize Compressed file!";
-                delete _ptrFileUtilityObj;
-                _ptrFileUtilityObj = NULL;
-                return -1;
-            }
-            _fileFormat = kFileFormatCompressedFile;
-            break;
-        }
-        case kFileFormatPcm8kHzFile:
-        case kFileFormatPcm16kHzFile:
-        case kFileFormatPcm32kHzFile:
-        case kFileFormatPcm48kHzFile:
-        {
-            if(!ValidFrequency(codecInst.plfreq) ||
-               _ptrFileUtilityObj->InitPCMWriting(stream, codecInst.plfreq) ==
-               -1)
-            {
-                LOG(LS_ERROR) << "Failed to initialize PCM file!";
-                delete _ptrFileUtilityObj;
-                _ptrFileUtilityObj = NULL;
-                return -1;
-            }
-            _fileFormat = format;
-            break;
-        }
-        case kFileFormatPreencodedFile:
-        {
-            if(_ptrFileUtilityObj->InitPreEncodedWriting(stream, codecInst) ==
-               -1)
-            {
-                LOG(LS_ERROR) << "Failed to initialize Pre-Encoded file!";
-                delete _ptrFileUtilityObj;
-                _ptrFileUtilityObj = NULL;
-                return -1;
-            }
-
-            _fileFormat = kFileFormatPreencodedFile;
-            break;
-        }
-        default:
-        {
-            LOG(LS_ERROR) << "Invalid file format " << format << " specified!";
-            delete _ptrFileUtilityObj;
-            _ptrFileUtilityObj = NULL;
-            return -1;
-        }
-    }
-    _isStereo = (tmpAudioCodec.channels == 2);
-    if(_isStereo)
-    {
-        if(_fileFormat != kFileFormatWavFile)
-        {
-            LOG(LS_WARNING) << "Stereo is only allowed for WAV files";
-            StopRecording();
-            return -1;
-        }
-        if((STR_NCASE_CMP(tmpAudioCodec.plname, "L16", 4) != 0) &&
-           (STR_NCASE_CMP(tmpAudioCodec.plname, "PCMU", 5) != 0) &&
-           (STR_NCASE_CMP(tmpAudioCodec.plname, "PCMA", 5) != 0))
-        {
-            LOG(LS_WARNING)
-                << "Stereo is only allowed for codec PCMU, PCMA and L16 ";
-            StopRecording();
-            return -1;
-        }
-    }
-    memcpy(&codec_info_, &tmpAudioCodec, sizeof(CodecInst));
-    _recordingActive = true;
-    _ptrOutStream = &stream;
-    _notificationMs = notificationTimeMs;
-    _recordDurationMs = 0;
-    return 0;
-}
-
-int32_t MediaFileImpl::StopRecording()
-{
-
-    rtc::CritScope lock(&_crit);
-    if(!_recordingActive)
-    {
-        LOG(LS_WARNING) << "recording is not active!";
-        return -1;
-    }
-
-    _isStereo = false;
-
-    if(_ptrFileUtilityObj != NULL)
-    {
-        // Both AVI and WAV header has to be updated before closing the stream
-        // because they contain size information.
-        if((_fileFormat == kFileFormatWavFile) &&
-            (_ptrOutStream != NULL))
-        {
-            _ptrFileUtilityObj->UpdateWavHeader(*_ptrOutStream);
-        }
+  CodecInst tmpAudioCodec;
+  memcpy(&tmpAudioCodec, &codecInst, sizeof(CodecInst));
+  switch (format) {
+    case kFileFormatWavFile: {
+      if (_ptrFileUtilityObj->InitWavWriting(stream, codecInst) == -1) {
+        LOG(LS_ERROR) << "Failed to initialize WAV file!";
         delete _ptrFileUtilityObj;
         _ptrFileUtilityObj = NULL;
-    }
-
-    if(_ptrOutStream != NULL)
-    {
-        // If MediaFileImpl opened the OutStream it must be reclaimed here.
-        if(_openFile)
-        {
-            delete _ptrOutStream;
-            _openFile = false;
-        }
-        _ptrOutStream = NULL;
-    }
-
-    _recordingActive = false;
-    codec_info_.pltype = 0;
-    codec_info_.plname[0] = '\0';
-
-    return 0;
-}
-
-bool MediaFileImpl::IsRecording()
-{
-    LOG(LS_VERBOSE) << "MediaFileImpl::IsRecording()";
-    rtc::CritScope lock(&_crit);
-    return _recordingActive;
-}
-
-int32_t MediaFileImpl::RecordDurationMs(uint32_t& durationMs)
-{
-
-    rtc::CritScope lock(&_crit);
-    if(!_recordingActive)
-    {
-        durationMs = 0;
         return -1;
+      }
+      _fileFormat = kFileFormatWavFile;
+      break;
     }
-    durationMs = _recordDurationMs;
-    return 0;
+    case kFileFormatCompressedFile: {
+      // Write compression codec name at beginning of file
+      if (_ptrFileUtilityObj->InitCompressedWriting(stream, codecInst) == -1) {
+        LOG(LS_ERROR) << "Failed to initialize Compressed file!";
+        delete _ptrFileUtilityObj;
+        _ptrFileUtilityObj = NULL;
+        return -1;
+      }
+      _fileFormat = kFileFormatCompressedFile;
+      break;
+    }
+    case kFileFormatPcm8kHzFile:
+    case kFileFormatPcm16kHzFile:
+    case kFileFormatPcm32kHzFile:
+    case kFileFormatPcm48kHzFile: {
+      if (!ValidFrequency(codecInst.plfreq) ||
+          _ptrFileUtilityObj->InitPCMWriting(stream, codecInst.plfreq) == -1) {
+        LOG(LS_ERROR) << "Failed to initialize PCM file!";
+        delete _ptrFileUtilityObj;
+        _ptrFileUtilityObj = NULL;
+        return -1;
+      }
+      _fileFormat = format;
+      break;
+    }
+    case kFileFormatPreencodedFile: {
+      if (_ptrFileUtilityObj->InitPreEncodedWriting(stream, codecInst) == -1) {
+        LOG(LS_ERROR) << "Failed to initialize Pre-Encoded file!";
+        delete _ptrFileUtilityObj;
+        _ptrFileUtilityObj = NULL;
+        return -1;
+      }
+
+      _fileFormat = kFileFormatPreencodedFile;
+      break;
+    }
+    default: {
+      LOG(LS_ERROR) << "Invalid file format " << format << " specified!";
+      delete _ptrFileUtilityObj;
+      _ptrFileUtilityObj = NULL;
+      return -1;
+    }
+  }
+  _isStereo = (tmpAudioCodec.channels == 2);
+  if (_isStereo) {
+    if (_fileFormat != kFileFormatWavFile) {
+      LOG(LS_WARNING) << "Stereo is only allowed for WAV files";
+      StopRecording();
+      return -1;
+    }
+    if ((STR_NCASE_CMP(tmpAudioCodec.plname, "L16", 4) != 0) &&
+        (STR_NCASE_CMP(tmpAudioCodec.plname, "PCMU", 5) != 0) &&
+        (STR_NCASE_CMP(tmpAudioCodec.plname, "PCMA", 5) != 0)) {
+      LOG(LS_WARNING) << "Stereo is only allowed for codec PCMU, PCMA and L16 ";
+      StopRecording();
+      return -1;
+    }
+  }
+  memcpy(&codec_info_, &tmpAudioCodec, sizeof(CodecInst));
+  _recordingActive = true;
+  _ptrOutStream = &stream;
+  _notificationMs = notificationTimeMs;
+  _recordDurationMs = 0;
+  return 0;
 }
 
-bool MediaFileImpl::IsStereo()
-{
-    LOG(LS_VERBOSE) << "MediaFileImpl::IsStereo()";
-    rtc::CritScope lock(&_crit);
-    return _isStereo;
+int32_t MediaFileImpl::StopRecording() {
+  rtc::CritScope lock(&_crit);
+  if (!_recordingActive) {
+    LOG(LS_WARNING) << "recording is not active!";
+    return -1;
+  }
+
+  _isStereo = false;
+
+  if (_ptrFileUtilityObj != NULL) {
+    // Both AVI and WAV header has to be updated before closing the stream
+    // because they contain size information.
+    if ((_fileFormat == kFileFormatWavFile) && (_ptrOutStream != NULL)) {
+      _ptrFileUtilityObj->UpdateWavHeader(*_ptrOutStream);
+    }
+    delete _ptrFileUtilityObj;
+    _ptrFileUtilityObj = NULL;
+  }
+
+  if (_ptrOutStream != NULL) {
+    // If MediaFileImpl opened the OutStream it must be reclaimed here.
+    if (_openFile) {
+      delete _ptrOutStream;
+      _openFile = false;
+    }
+    _ptrOutStream = NULL;
+  }
+
+  _recordingActive = false;
+  codec_info_.pltype = 0;
+  codec_info_.plname[0] = '\0';
+
+  return 0;
 }
 
-int32_t MediaFileImpl::SetModuleFileCallback(FileCallback* callback)
-{
+bool MediaFileImpl::IsRecording() {
+  LOG(LS_VERBOSE) << "MediaFileImpl::IsRecording()";
+  rtc::CritScope lock(&_crit);
+  return _recordingActive;
+}
 
-    rtc::CritScope lock(&_callbackCrit);
+int32_t MediaFileImpl::RecordDurationMs(uint32_t& durationMs) {
+  rtc::CritScope lock(&_crit);
+  if (!_recordingActive) {
+    durationMs = 0;
+    return -1;
+  }
+  durationMs = _recordDurationMs;
+  return 0;
+}
 
-    _ptrCallback = callback;
-    return 0;
+bool MediaFileImpl::IsStereo() {
+  LOG(LS_VERBOSE) << "MediaFileImpl::IsStereo()";
+  rtc::CritScope lock(&_crit);
+  return _isStereo;
+}
+
+int32_t MediaFileImpl::SetModuleFileCallback(FileCallback* callback) {
+  rtc::CritScope lock(&_callbackCrit);
+
+  _ptrCallback = callback;
+  return 0;
 }
 
 int32_t MediaFileImpl::FileDurationMs(const char* fileName,
                                       uint32_t& durationMs,
                                       const FileFormats format,
-                                      const uint32_t freqInHz)
-{
+                                      const uint32_t freqInHz) {
+  if (!ValidFileName(fileName)) {
+    return -1;
+  }
+  if (!ValidFrequency(freqInHz)) {
+    return -1;
+  }
 
-    if(!ValidFileName(fileName))
-    {
-        return -1;
-    }
-    if(!ValidFrequency(freqInHz))
-    {
-        return -1;
-    }
+  ModuleFileUtility* utilityObj = new ModuleFileUtility();
+  if (utilityObj == NULL) {
+    LOG(LS_ERROR) << "failed to allocate utility object!";
+    return -1;
+  }
 
-    ModuleFileUtility* utilityObj = new ModuleFileUtility();
-    if(utilityObj == NULL)
-    {
-        LOG(LS_ERROR) << "failed to allocate utility object!";
-        return -1;
-    }
+  const int32_t duration =
+      utilityObj->FileDurationMs(fileName, format, freqInHz);
+  delete utilityObj;
+  if (duration == -1) {
+    durationMs = 0;
+    return -1;
+  }
 
-    const int32_t duration = utilityObj->FileDurationMs(fileName, format,
-                                                        freqInHz);
-    delete utilityObj;
-    if(duration == -1)
-    {
-        durationMs = 0;
-        return -1;
-    }
-
-    durationMs = duration;
-    return 0;
+  durationMs = duration;
+  return 0;
 }
 
-int32_t MediaFileImpl::PlayoutPositionMs(uint32_t& positionMs) const
-{
-    rtc::CritScope lock(&_crit);
-    if(!_playingActive)
-    {
-        positionMs = 0;
-        return -1;
-    }
-    positionMs = _playoutPositionMs;
-    return 0;
+int32_t MediaFileImpl::PlayoutPositionMs(uint32_t& positionMs) const {
+  rtc::CritScope lock(&_crit);
+  if (!_playingActive) {
+    positionMs = 0;
+    return -1;
+  }
+  positionMs = _playoutPositionMs;
+  return 0;
 }
 
-int32_t MediaFileImpl::codec_info(CodecInst& codecInst) const
-{
-    rtc::CritScope lock(&_crit);
-    if(!_playingActive && !_recordingActive)
-    {
-        LOG(LS_ERROR) << "Neither playout nor recording has been initialized!";
-        return -1;
-    }
-    if (codec_info_.pltype == 0 && codec_info_.plname[0] == '\0')
-    {
-        LOG(LS_ERROR) << "The CodecInst for "
-                      << (_playingActive ? "Playback" : "Recording")
-                      << " is unknown!";
-        return -1;
-    }
-    memcpy(&codecInst,&codec_info_,sizeof(CodecInst));
-    return 0;
+int32_t MediaFileImpl::codec_info(CodecInst& codecInst) const {
+  rtc::CritScope lock(&_crit);
+  if (!_playingActive && !_recordingActive) {
+    LOG(LS_ERROR) << "Neither playout nor recording has been initialized!";
+    return -1;
+  }
+  if (codec_info_.pltype == 0 && codec_info_.plname[0] == '\0') {
+    LOG(LS_ERROR) << "The CodecInst for "
+                  << (_playingActive ? "Playback" : "Recording")
+                  << " is unknown!";
+    return -1;
+  }
+  memcpy(&codecInst, &codec_info_, sizeof(CodecInst));
+  return 0;
 }
 
 bool MediaFileImpl::ValidFileFormat(const FileFormats format,
-                                    const CodecInst*  codecInst)
-{
-    if(codecInst == NULL)
-    {
-        if(format == kFileFormatPreencodedFile ||
-           format == kFileFormatPcm8kHzFile    ||
-           format == kFileFormatPcm16kHzFile   ||
-           format == kFileFormatPcm32kHzFile   ||
-           format == kFileFormatPcm48kHzFile)
-        {
-            LOG(LS_ERROR) << "Codec info required for file format specified!";
-            return false;
-        }
+                                    const CodecInst* codecInst) {
+  if (codecInst == NULL) {
+    if (format == kFileFormatPreencodedFile ||
+        format == kFileFormatPcm8kHzFile || format == kFileFormatPcm16kHzFile ||
+        format == kFileFormatPcm32kHzFile ||
+        format == kFileFormatPcm48kHzFile) {
+      LOG(LS_ERROR) << "Codec info required for file format specified!";
+      return false;
     }
-    return true;
+  }
+  return true;
 }
 
-bool MediaFileImpl::ValidFileName(const char* fileName)
-{
-    if((fileName == NULL) ||(fileName[0] == '\0'))
-    {
-        LOG(LS_ERROR) << "FileName not specified!";
-        return false;
-    }
-    return true;
+bool MediaFileImpl::ValidFileName(const char* fileName) {
+  if ((fileName == NULL) || (fileName[0] == '\0')) {
+    LOG(LS_ERROR) << "FileName not specified!";
+    return false;
+  }
+  return true;
 }
 
-
 bool MediaFileImpl::ValidFilePositions(const uint32_t startPointMs,
-                                       const uint32_t stopPointMs)
-{
-    if(startPointMs == 0 && stopPointMs == 0) // Default values
-    {
-        return true;
-    }
-    if(stopPointMs &&(startPointMs >= stopPointMs))
-    {
-        LOG(LS_ERROR) << "startPointMs must be less than stopPointMs!";
-        return false;
-    }
-    if(stopPointMs &&((stopPointMs - startPointMs) < 20))
-    {
-        LOG(LS_ERROR) << "minimum play duration for files is 20 ms!";
-        return false;
-    }
+                                       const uint32_t stopPointMs) {
+  if (startPointMs == 0 && stopPointMs == 0)  // Default values
+  {
     return true;
+  }
+  if (stopPointMs && (startPointMs >= stopPointMs)) {
+    LOG(LS_ERROR) << "startPointMs must be less than stopPointMs!";
+    return false;
+  }
+  if (stopPointMs && ((stopPointMs - startPointMs) < 20)) {
+    LOG(LS_ERROR) << "minimum play duration for files is 20 ms!";
+    return false;
+  }
+  return true;
 }
 
-bool MediaFileImpl::ValidFrequency(const uint32_t frequency)
-{
-    if((frequency == 8000) || (frequency == 16000)|| (frequency == 32000) ||
-       (frequency == 48000))
-    {
-        return true;
-    }
-    LOG(LS_ERROR) << "Frequency should be 8000, 16000, 32000, or 48000 (Hz)";
-    return false;
+bool MediaFileImpl::ValidFrequency(const uint32_t frequency) {
+  if ((frequency == 8000) || (frequency == 16000) || (frequency == 32000) ||
+      (frequency == 48000)) {
+    return true;
+  }
+  LOG(LS_ERROR) << "Frequency should be 8000, 16000, 32000, or 48000 (Hz)";
+  return false;
 }
 }  // namespace webrtc
diff --git a/modules/media_file/media_file_utility.cc b/modules/media_file/media_file_utility.cc
index b21509f..d8ba066 100644
--- a/modules/media_file/media_file_utility.cc
+++ b/modules/media_file/media_file_utility.cc
@@ -27,19 +27,17 @@
 
 // First 16 bytes the WAVE header. ckID should be "RIFF", wave_ckID should be
 // "WAVE" and ckSize is the chunk size (4 + n)
-struct WAVE_RIFF_header
-{
-    int8_t  ckID[4];
-    int32_t ckSize;
-    int8_t  wave_ckID[4];
+struct WAVE_RIFF_header {
+  int8_t ckID[4];
+  int32_t ckSize;
+  int8_t wave_ckID[4];
 };
 
 // First 8 byte of the format chunk. fmt_ckID should be "fmt ". fmt_ckSize is
 // the chunk size (16, 18 or 40 byte)
-struct WAVE_CHUNK_header
-{
-   int8_t   fmt_ckID[4];
-   uint32_t fmt_ckSize;
+struct WAVE_CHUNK_header {
+  int8_t fmt_ckID[4];
+  uint32_t fmt_ckSize;
 };
 }  // unnamed namespace
 
@@ -59,1463 +57,1178 @@
       _reading(false),
       _writing(false),
       _tempData() {
-    LOG(LS_INFO) << "ModuleFileUtility::ModuleFileUtility()";
-    memset(&codec_info_,0,sizeof(CodecInst));
-    codec_info_.pltype = -1;
+  LOG(LS_INFO) << "ModuleFileUtility::ModuleFileUtility()";
+  memset(&codec_info_, 0, sizeof(CodecInst));
+  codec_info_.pltype = -1;
 }
 
-ModuleFileUtility::~ModuleFileUtility()
-{
-    LOG(LS_INFO) << "ModuleFileUtility::~ModuleFileUtility()";
+ModuleFileUtility::~ModuleFileUtility() {
+  LOG(LS_INFO) << "ModuleFileUtility::~ModuleFileUtility()";
 }
 
-int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
-{
-    WAVE_RIFF_header RIFFheaderObj;
-    WAVE_CHUNK_header CHUNKheaderObj;
-    // TODO (hellner): tmpStr and tmpStr2 seems unnecessary here.
-    char tmpStr[6] = "FOUR";
-    unsigned char tmpStr2[4];
-    size_t i;
-    bool dataFound = false;
-    bool fmtFound = false;
-    int8_t dummyRead;
+int32_t ModuleFileUtility::ReadWavHeader(InStream& wav) {
+  WAVE_RIFF_header RIFFheaderObj;
+  WAVE_CHUNK_header CHUNKheaderObj;
+  // TODO (hellner): tmpStr and tmpStr2 seems unnecessary here.
+  char tmpStr[6] = "FOUR";
+  unsigned char tmpStr2[4];
+  size_t i;
+  bool dataFound = false;
+  bool fmtFound = false;
+  int8_t dummyRead;
 
+  _dataSize = 0;
+  int len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
+  if (len != static_cast<int>(sizeof(WAVE_RIFF_header))) {
+    LOG(LS_ERROR) << "Not a wave file (too short)";
+    return -1;
+  }
 
-    _dataSize = 0;
-    int len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
-    if (len != static_cast<int>(sizeof(WAVE_RIFF_header)))
-    {
-        LOG(LS_ERROR) << "Not a wave file (too short)";
-        return -1;
-    }
+  for (i = 0; i < 4; i++) {
+    tmpStr[i] = RIFFheaderObj.ckID[i];
+  }
+  if (strcmp(tmpStr, "RIFF") != 0) {
+    LOG(LS_ERROR) << "Not a wave file (does not have RIFF)";
+    return -1;
+  }
+  for (i = 0; i < 4; i++) {
+    tmpStr[i] = RIFFheaderObj.wave_ckID[i];
+  }
+  if (strcmp(tmpStr, "WAVE") != 0) {
+    LOG(LS_ERROR) << "Not a wave file (does not have WAVE)";
+    return -1;
+  }
 
-    for (i = 0; i < 4; i++)
-    {
-        tmpStr[i] = RIFFheaderObj.ckID[i];
-    }
-    if(strcmp(tmpStr, "RIFF") != 0)
-    {
-        LOG(LS_ERROR) << "Not a wave file (does not have RIFF)";
+  len = wav.Read(&CHUNKheaderObj, sizeof(WAVE_CHUNK_header));
+
+  // WAVE files are stored in little endian byte order. Make sure that the
+  // data can be read on big endian as well.
+  // TODO (hellner): little endian to system byte order should be done in
+  //                 in a subroutine.
+  memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
+  CHUNKheaderObj.fmt_ckSize =
+      (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+      (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
+
+  memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
+
+  while ((len == static_cast<int>(sizeof(WAVE_CHUNK_header))) &&
+         (!fmtFound || !dataFound)) {
+    if (strcmp(tmpStr, "fmt ") == 0) {
+      len = wav.Read(&_wavFormatObj, sizeof(WAVE_FMTINFO_header));
+
+      memcpy(tmpStr2, &_wavFormatObj.formatTag, 2);
+      _wavFormatObj.formatTag =
+          (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8);
+      memcpy(tmpStr2, &_wavFormatObj.nChannels, 2);
+      _wavFormatObj.nChannels =
+          (int16_t)((uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8));
+      memcpy(tmpStr2, &_wavFormatObj.nSamplesPerSec, 4);
+      _wavFormatObj.nSamplesPerSec = (int32_t)(
+          (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+          (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24));
+      memcpy(tmpStr2, &_wavFormatObj.nAvgBytesPerSec, 4);
+      _wavFormatObj.nAvgBytesPerSec = (int32_t)(
+          (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+          (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24));
+      memcpy(tmpStr2, &_wavFormatObj.nBlockAlign, 2);
+      _wavFormatObj.nBlockAlign =
+          (int16_t)((uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8));
+      memcpy(tmpStr2, &_wavFormatObj.nBitsPerSample, 2);
+      _wavFormatObj.nBitsPerSample =
+          (int16_t)((uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8));
+
+      if (CHUNKheaderObj.fmt_ckSize < sizeof(WAVE_FMTINFO_header)) {
+        LOG(LS_ERROR) << "Chunk size is too small";
         return -1;
-    }
-    for (i = 0; i < 4; i++)
-    {
-        tmpStr[i] = RIFFheaderObj.wave_ckID[i];
-    }
-    if(strcmp(tmpStr, "WAVE") != 0)
-    {
-        LOG(LS_ERROR) << "Not a wave file (does not have WAVE)";
-        return -1;
+      }
+      for (i = 0; i < CHUNKheaderObj.fmt_ckSize - sizeof(WAVE_FMTINFO_header);
+           i++) {
+        len = wav.Read(&dummyRead, 1);
+        if (len != 1) {
+          LOG(LS_ERROR) << "File corrupted, reached EOF (reading fmt)";
+          return -1;
+        }
+      }
+      fmtFound = true;
+    } else if (strcmp(tmpStr, "data") == 0) {
+      _dataSize = CHUNKheaderObj.fmt_ckSize;
+      dataFound = true;
+      break;
+    } else {
+      for (i = 0; i < CHUNKheaderObj.fmt_ckSize; i++) {
+        len = wav.Read(&dummyRead, 1);
+        if (len != 1) {
+          LOG(LS_ERROR) << "File corrupted, reached EOF (reading other)";
+          return -1;
+        }
+      }
     }
 
     len = wav.Read(&CHUNKheaderObj, sizeof(WAVE_CHUNK_header));
 
-    // WAVE files are stored in little endian byte order. Make sure that the
-    // data can be read on big endian as well.
-    // TODO (hellner): little endian to system byte order should be done in
-    //                 in a subroutine.
     memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
     CHUNKheaderObj.fmt_ckSize =
         (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
         (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
 
     memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
+  }
 
-    while ((len == static_cast<int>(sizeof(WAVE_CHUNK_header))) &&
-           (!fmtFound || !dataFound))
-    {
-        if(strcmp(tmpStr, "fmt ") == 0)
-        {
-            len = wav.Read(&_wavFormatObj, sizeof(WAVE_FMTINFO_header));
+  // Either a proper format chunk has been read or a data chunk was come
+  // across.
+  if ((_wavFormatObj.formatTag != kWavFormatPcm) &&
+      (_wavFormatObj.formatTag != kWavFormatALaw) &&
+      (_wavFormatObj.formatTag != kWavFormatMuLaw)) {
+    LOG(LS_ERROR) << "Coding formatTag value=" << _wavFormatObj.formatTag
+                  << " not supported!";
+    return -1;
+  }
+  if ((_wavFormatObj.nChannels < 1) || (_wavFormatObj.nChannels > 2)) {
+    LOG(LS_ERROR) << "nChannels value=" << _wavFormatObj.nChannels
+                  << " not supported!";
+    return -1;
+  }
 
-            memcpy(tmpStr2, &_wavFormatObj.formatTag, 2);
-            _wavFormatObj.formatTag =
-                (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1])<<8);
-            memcpy(tmpStr2, &_wavFormatObj.nChannels, 2);
-            _wavFormatObj.nChannels =
-                (int16_t) ((uint32_t)tmpStr2[0] +
-                                 (((uint32_t)tmpStr2[1])<<8));
-            memcpy(tmpStr2, &_wavFormatObj.nSamplesPerSec, 4);
-            _wavFormatObj.nSamplesPerSec =
-                (int32_t) ((uint32_t)tmpStr2[0] +
-                                 (((uint32_t)tmpStr2[1])<<8) +
-                                 (((uint32_t)tmpStr2[2])<<16) +
-                                 (((uint32_t)tmpStr2[3])<<24));
-            memcpy(tmpStr2, &_wavFormatObj.nAvgBytesPerSec, 4);
-            _wavFormatObj.nAvgBytesPerSec =
-                (int32_t) ((uint32_t)tmpStr2[0] +
-                                 (((uint32_t)tmpStr2[1])<<8) +
-                                 (((uint32_t)tmpStr2[2])<<16) +
-                                 (((uint32_t)tmpStr2[3])<<24));
-            memcpy(tmpStr2, &_wavFormatObj.nBlockAlign, 2);
-            _wavFormatObj.nBlockAlign =
-                (int16_t) ((uint32_t)tmpStr2[0] +
-                                 (((uint32_t)tmpStr2[1])<<8));
-            memcpy(tmpStr2, &_wavFormatObj.nBitsPerSample, 2);
-            _wavFormatObj.nBitsPerSample =
-                (int16_t) ((uint32_t)tmpStr2[0] +
-                                 (((uint32_t)tmpStr2[1])<<8));
+  if ((_wavFormatObj.nBitsPerSample != 8) &&
+      (_wavFormatObj.nBitsPerSample != 16)) {
+    LOG(LS_ERROR) << "nBitsPerSample value=" << _wavFormatObj.nBitsPerSample
+                  << " not supported!";
+    return -1;
+  }
 
-            if (CHUNKheaderObj.fmt_ckSize < sizeof(WAVE_FMTINFO_header))
-            {
-                LOG(LS_ERROR) << "Chunk size is too small";
-                return -1;
-            }
-            for (i = 0;
-                 i < CHUNKheaderObj.fmt_ckSize - sizeof(WAVE_FMTINFO_header);
-                 i++)
-            {
-                len = wav.Read(&dummyRead, 1);
-                if(len != 1)
-                {
-                    LOG(LS_ERROR)
-                        << "File corrupted, reached EOF (reading fmt)";
-                    return -1;
-                }
-            }
-            fmtFound = true;
-        }
-        else if(strcmp(tmpStr, "data") == 0)
-        {
-            _dataSize = CHUNKheaderObj.fmt_ckSize;
-            dataFound = true;
-            break;
-        }
-        else
-        {
-            for (i = 0; i < CHUNKheaderObj.fmt_ckSize; i++)
-            {
-                len = wav.Read(&dummyRead, 1);
-                if(len != 1)
-                {
-                    LOG(LS_ERROR)
-                        << "File corrupted, reached EOF (reading other)";
-                    return -1;
-                }
-            }
-        }
-
-        len = wav.Read(&CHUNKheaderObj, sizeof(WAVE_CHUNK_header));
-
-        memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
-        CHUNKheaderObj.fmt_ckSize =
-            (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
-            (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
-
-        memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
-    }
-
-    // Either a proper format chunk has been read or a data chunk was come
-    // across.
-    if( (_wavFormatObj.formatTag != kWavFormatPcm) &&
-        (_wavFormatObj.formatTag != kWavFormatALaw) &&
-        (_wavFormatObj.formatTag != kWavFormatMuLaw))
-    {
-        LOG(LS_ERROR) << "Coding formatTag value=" << _wavFormatObj.formatTag
-                      << " not supported!";
-        return -1;
-    }
-    if((_wavFormatObj.nChannels < 1) ||
-        (_wavFormatObj.nChannels > 2))
-    {
-        LOG(LS_ERROR) << "nChannels value=" << _wavFormatObj.nChannels
-                      << " not supported!";
-        return -1;
-    }
-
-    if((_wavFormatObj.nBitsPerSample != 8) &&
-        (_wavFormatObj.nBitsPerSample != 16))
-    {
-        LOG(LS_ERROR) << "nBitsPerSample value=" << _wavFormatObj.nBitsPerSample
-                      << " not supported!";
-        return -1;
-    }
-
-    // Calculate the number of bytes that 10 ms of audio data correspond to.
-    size_t samples_per_10ms =
-        ((_wavFormatObj.formatTag == kWavFormatPcm) &&
-         (_wavFormatObj.nSamplesPerSec == 44100)) ?
-        440 : static_cast<size_t>(_wavFormatObj.nSamplesPerSec / 100);
-    _readSizeBytes = samples_per_10ms * _wavFormatObj.nChannels *
-        (_wavFormatObj.nBitsPerSample / 8);
-    return 0;
+  // Calculate the number of bytes that 10 ms of audio data correspond to.
+  size_t samples_per_10ms =
+      ((_wavFormatObj.formatTag == kWavFormatPcm) &&
+       (_wavFormatObj.nSamplesPerSec == 44100))
+          ? 440
+          : static_cast<size_t>(_wavFormatObj.nSamplesPerSec / 100);
+  _readSizeBytes = samples_per_10ms * _wavFormatObj.nChannels *
+                   (_wavFormatObj.nBitsPerSample / 8);
+  return 0;
 }
 
 int32_t ModuleFileUtility::InitWavCodec(uint32_t samplesPerSec,
                                         size_t channels,
                                         uint32_t bitsPerSample,
-                                        uint32_t formatTag)
-{
-    codec_info_.pltype   = -1;
-    codec_info_.plfreq   = samplesPerSec;
-    codec_info_.channels = channels;
-    codec_info_.rate     = bitsPerSample * samplesPerSec;
+                                        uint32_t formatTag) {
+  codec_info_.pltype = -1;
+  codec_info_.plfreq = samplesPerSec;
+  codec_info_.channels = channels;
+  codec_info_.rate = bitsPerSample * samplesPerSec;
 
-    // Calculate the packet size for 10ms frames
-    switch(formatTag)
-    {
+  // Calculate the packet size for 10ms frames
+  switch (formatTag) {
     case kWavFormatALaw:
-        strcpy(codec_info_.plname, "PCMA");
-        _codecId = kCodecPcma;
-        codec_info_.pltype = 8;
-        codec_info_.pacsize  = codec_info_.plfreq / 100;
-        break;
+      strcpy(codec_info_.plname, "PCMA");
+      _codecId = kCodecPcma;
+      codec_info_.pltype = 8;
+      codec_info_.pacsize = codec_info_.plfreq / 100;
+      break;
     case kWavFormatMuLaw:
-        strcpy(codec_info_.plname, "PCMU");
-        _codecId = kCodecPcmu;
-        codec_info_.pltype = 0;
-        codec_info_.pacsize  = codec_info_.plfreq / 100;
-         break;
+      strcpy(codec_info_.plname, "PCMU");
+      _codecId = kCodecPcmu;
+      codec_info_.pltype = 0;
+      codec_info_.pacsize = codec_info_.plfreq / 100;
+      break;
     case kWavFormatPcm:
-        codec_info_.pacsize  = (bitsPerSample * (codec_info_.plfreq / 100)) / 8;
-        if(samplesPerSec == 8000)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_8Khz;
-        }
-        else if(samplesPerSec == 16000)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_16kHz;
-        }
-        else if(samplesPerSec == 32000)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_32Khz;
-        }
-        // Set the packet size for "odd" sampling frequencies so that it
-        // properly corresponds to _readSizeBytes.
-        else if(samplesPerSec == 11025)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 110;
-            codec_info_.plfreq = 11000;
-        }
-        else if(samplesPerSec == 22050)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 220;
-            codec_info_.plfreq = 22000;
-        }
-        else if(samplesPerSec == 44100)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 440;
-            codec_info_.plfreq = 44000;
-        }
-        else if(samplesPerSec == 48000)
-        {
-            strcpy(codec_info_.plname, "L16");
-            _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 480;
-            codec_info_.plfreq = 48000;
-        }
-        else
-        {
-            LOG(LS_ERROR) << "Unsupported PCM frequency!";
-            return -1;
-        }
-        break;
-        default:
-            LOG(LS_ERROR) << "unknown WAV format TAG!";
-            return -1;
-            break;
-    }
-    return 0;
+      codec_info_.pacsize = (bitsPerSample * (codec_info_.plfreq / 100)) / 8;
+      if (samplesPerSec == 8000) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_8Khz;
+      } else if (samplesPerSec == 16000) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_16kHz;
+      } else if (samplesPerSec == 32000) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_32Khz;
+      }
+      // Set the packet size for "odd" sampling frequencies so that it
+      // properly corresponds to _readSizeBytes.
+      else if (samplesPerSec == 11025) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_16kHz;
+        codec_info_.pacsize = 110;
+        codec_info_.plfreq = 11000;
+      } else if (samplesPerSec == 22050) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_16kHz;
+        codec_info_.pacsize = 220;
+        codec_info_.plfreq = 22000;
+      } else if (samplesPerSec == 44100) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_16kHz;
+        codec_info_.pacsize = 440;
+        codec_info_.plfreq = 44000;
+      } else if (samplesPerSec == 48000) {
+        strcpy(codec_info_.plname, "L16");
+        _codecId = kCodecL16_16kHz;
+        codec_info_.pacsize = 480;
+        codec_info_.plfreq = 48000;
+      } else {
+        LOG(LS_ERROR) << "Unsupported PCM frequency!";
+        return -1;
+      }
+      break;
+    default:
+      LOG(LS_ERROR) << "unknown WAV format TAG!";
+      return -1;
+      break;
+  }
+  return 0;
 }
 
 int32_t ModuleFileUtility::InitWavReading(InStream& wav,
                                           const uint32_t start,
-                                          const uint32_t stop)
-{
+                                          const uint32_t stop) {
+  _reading = false;
 
-    _reading = false;
+  if (ReadWavHeader(wav) == -1) {
+    LOG(LS_ERROR) << "failed to read WAV header!";
+    return -1;
+  }
 
-    if(ReadWavHeader(wav) == -1)
-    {
-        LOG(LS_ERROR) << "failed to read WAV header!";
-        return -1;
-    }
+  _playoutPositionMs = 0;
+  _readPos = 0;
 
-    _playoutPositionMs = 0;
-    _readPos = 0;
-
-    if(start > 0)
-    {
-        uint8_t dummy[WAV_MAX_BUFFER_SIZE];
-        int readLength;
-        if(_readSizeBytes <= WAV_MAX_BUFFER_SIZE)
+  if (start > 0) {
+    uint8_t dummy[WAV_MAX_BUFFER_SIZE];
+    int readLength;
+    if (_readSizeBytes <= WAV_MAX_BUFFER_SIZE) {
+      while (_playoutPositionMs < start) {
+        readLength = wav.Read(dummy, _readSizeBytes);
+        if (readLength == static_cast<int>(_readSizeBytes)) {
+          _readPos += _readSizeBytes;
+          _playoutPositionMs += 10;
+        } else  // Must have reached EOF before start position!
         {
-            while (_playoutPositionMs < start)
-            {
-                readLength = wav.Read(dummy, _readSizeBytes);
-                if(readLength == static_cast<int>(_readSizeBytes))
-                {
-                    _readPos += _readSizeBytes;
-                    _playoutPositionMs += 10;
-                }
-                else // Must have reached EOF before start position!
-                {
-                    LOG(LS_ERROR)
-                        << "InitWavReading(), EOF before start position";
-                    return -1;
-                }
-            }
+          LOG(LS_ERROR) << "InitWavReading(), EOF before start position";
+          return -1;
         }
-        else
-        {
-            return -1;
-        }
-    }
-    if( InitWavCodec(_wavFormatObj.nSamplesPerSec, _wavFormatObj.nChannels,
-                     _wavFormatObj.nBitsPerSample,
-                     _wavFormatObj.formatTag) != 0)
-    {
-        return -1;
-    }
-    _bytesPerSample = static_cast<size_t>(_wavFormatObj.nBitsPerSample / 8);
-
-
-    _startPointInMs = start;
-    _stopPointInMs = stop;
-    _reading = true;
-    return 0;
-}
-
-int32_t ModuleFileUtility::ReadWavDataAsMono(
-    InStream& wav,
-    int8_t* outData,
-    const size_t bufferSize)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavDataAsMono(wav= " << &wav
-                    << ", outData= " << static_cast<void*>(outData)
-                    << ", bufSize= " << bufferSize << ")";
-
-    // The number of bytes that should be read from file.
-    const size_t totalBytesNeeded = _readSizeBytes;
-    // The number of bytes that will be written to outData.
-    const size_t bytesRequested = (codec_info_.channels == 2) ?
-        totalBytesNeeded >> 1 : totalBytesNeeded;
-    if(bufferSize < bytesRequested)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer is too short!";
-        return -1;
-    }
-    if(outData == NULL)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer NULL!";
-        return -1;
-    }
-
-    if(!_reading)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsMono: no longer reading file.";
-        return -1;
-    }
-
-    int32_t bytesRead = ReadWavData(
-        wav,
-        (codec_info_.channels == 2) ? _tempData : (uint8_t*)outData,
-        totalBytesNeeded);
-    if(bytesRead == 0)
-    {
-        return 0;
-    }
-    if(bytesRead < 0)
-    {
-        LOG(LS_ERROR)
-            << "ReadWavDataAsMono: failed to read data from WAV file.";
-        return -1;
-    }
-    // Output data is should be mono.
-    if(codec_info_.channels == 2)
-    {
-        for (size_t i = 0; i < bytesRequested / _bytesPerSample; i++)
-        {
-            // Sample value is the average of left and right buffer rounded to
-            // closest integer value. Note samples can be either 1 or 2 byte.
-            if(_bytesPerSample == 1)
-            {
-                _tempData[i] = ((_tempData[2 * i] + _tempData[(2 * i) + 1] +
-                                 1) >> 1);
-            }
-            else
-            {
-                int16_t* sampleData = (int16_t*) _tempData;
-                sampleData[i] = ((sampleData[2 * i] + sampleData[(2 * i) + 1] +
-                                  1) >> 1);
-            }
-        }
-        memcpy(outData, _tempData, bytesRequested);
-    }
-    return static_cast<int32_t>(bytesRequested);
-}
-
-int32_t ModuleFileUtility::ReadWavDataAsStereo(
-    InStream& wav,
-    int8_t* outDataLeft,
-    int8_t* outDataRight,
-    const size_t bufferSize)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavDataAsStereo(wav= " << &wav
-                    << ", outLeft= " << static_cast<void*>(outDataLeft)
-                    << ", outRight= " << static_cast<void*>(outDataRight)
-                    << ", bufSize= " << bufferSize << ")";
-
-    if((outDataLeft == NULL) ||
-       (outDataRight == NULL))
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsStereo: an input buffer is NULL!";
-        return -1;
-    }
-    if(codec_info_.channels != 2)
-    {
-        LOG(LS_ERROR)
-            << "ReadWavDataAsStereo: WAV file does not contain stereo data!";
-        return -1;
-    }
-    if(! _reading)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsStereo: no longer reading file.";
-        return -1;
-    }
-
-    // The number of bytes that should be read from file.
-    const size_t totalBytesNeeded = _readSizeBytes;
-    // The number of bytes that will be written to the left and the right
-    // buffers.
-    const size_t bytesRequested = totalBytesNeeded >> 1;
-    if(bufferSize < bytesRequested)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsStereo: Output buffers are too short!";
-        assert(false);
-        return -1;
-    }
-
-    int32_t bytesRead = ReadWavData(wav, _tempData, totalBytesNeeded);
-    if(bytesRead <= 0)
-    {
-        LOG(LS_ERROR)
-            << "ReadWavDataAsStereo: failed to read data from WAV file.";
-        return -1;
-    }
-
-    // Turn interleaved audio to left and right buffer. Note samples can be
-    // either 1 or 2 bytes
-    if(_bytesPerSample == 1)
-    {
-        for (size_t i = 0; i < bytesRequested; i++)
-        {
-            outDataLeft[i]  = _tempData[2 * i];
-            outDataRight[i] = _tempData[(2 * i) + 1];
-        }
-    }
-    else if(_bytesPerSample == 2)
-    {
-        int16_t* sampleData = reinterpret_cast<int16_t*>(_tempData);
-        int16_t* outLeft = reinterpret_cast<int16_t*>(outDataLeft);
-        int16_t* outRight = reinterpret_cast<int16_t*>(
-            outDataRight);
-
-        // Bytes requested to samples requested.
-        size_t sampleCount = bytesRequested >> 1;
-        for (size_t i = 0; i < sampleCount; i++)
-        {
-            outLeft[i] = sampleData[2 * i];
-            outRight[i] = sampleData[(2 * i) + 1];
-        }
+      }
     } else {
-        LOG(LS_ERROR) << "ReadWavStereoData: unsupported sample size "
-                      << _bytesPerSample << "!";
-        assert(false);
-        return -1;
+      return -1;
     }
-    return static_cast<int32_t>(bytesRequested);
+  }
+  if (InitWavCodec(_wavFormatObj.nSamplesPerSec, _wavFormatObj.nChannels,
+                   _wavFormatObj.nBitsPerSample,
+                   _wavFormatObj.formatTag) != 0) {
+    return -1;
+  }
+  _bytesPerSample = static_cast<size_t>(_wavFormatObj.nBitsPerSample / 8);
+
+  _startPointInMs = start;
+  _stopPointInMs = stop;
+  _reading = true;
+  return 0;
+}
+
+int32_t ModuleFileUtility::ReadWavDataAsMono(InStream& wav,
+                                             int8_t* outData,
+                                             const size_t bufferSize) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavDataAsMono(wav= " << &wav
+                  << ", outData= " << static_cast<void*>(outData)
+                  << ", bufSize= " << bufferSize << ")";
+
+  // The number of bytes that should be read from file.
+  const size_t totalBytesNeeded = _readSizeBytes;
+  // The number of bytes that will be written to outData.
+  const size_t bytesRequested =
+      (codec_info_.channels == 2) ? totalBytesNeeded >> 1 : totalBytesNeeded;
+  if (bufferSize < bytesRequested) {
+    LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer is too short!";
+    return -1;
+  }
+  if (outData == NULL) {
+    LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer NULL!";
+    return -1;
+  }
+
+  if (!_reading) {
+    LOG(LS_ERROR) << "ReadWavDataAsMono: no longer reading file.";
+    return -1;
+  }
+
+  int32_t bytesRead = ReadWavData(
+      wav, (codec_info_.channels == 2) ? _tempData : (uint8_t*)outData,
+      totalBytesNeeded);
+  if (bytesRead == 0) {
+    return 0;
+  }
+  if (bytesRead < 0) {
+    LOG(LS_ERROR) << "ReadWavDataAsMono: failed to read data from WAV file.";
+    return -1;
+  }
+  // Output data is should be mono.
+  if (codec_info_.channels == 2) {
+    for (size_t i = 0; i < bytesRequested / _bytesPerSample; i++) {
+      // Sample value is the average of left and right buffer rounded to
+      // closest integer value. Note samples can be either 1 or 2 byte.
+      if (_bytesPerSample == 1) {
+        _tempData[i] = ((_tempData[2 * i] + _tempData[(2 * i) + 1] + 1) >> 1);
+      } else {
+        int16_t* sampleData = (int16_t*)_tempData;
+        sampleData[i] =
+            ((sampleData[2 * i] + sampleData[(2 * i) + 1] + 1) >> 1);
+      }
+    }
+    memcpy(outData, _tempData, bytesRequested);
+  }
+  return static_cast<int32_t>(bytesRequested);
+}
+
+int32_t ModuleFileUtility::ReadWavDataAsStereo(InStream& wav,
+                                               int8_t* outDataLeft,
+                                               int8_t* outDataRight,
+                                               const size_t bufferSize) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavDataAsStereo(wav= " << &wav
+                  << ", outLeft= " << static_cast<void*>(outDataLeft)
+                  << ", outRight= " << static_cast<void*>(outDataRight)
+                  << ", bufSize= " << bufferSize << ")";
+
+  if ((outDataLeft == NULL) || (outDataRight == NULL)) {
+    LOG(LS_ERROR) << "ReadWavDataAsStereo: an input buffer is NULL!";
+    return -1;
+  }
+  if (codec_info_.channels != 2) {
+    LOG(LS_ERROR)
+        << "ReadWavDataAsStereo: WAV file does not contain stereo data!";
+    return -1;
+  }
+  if (!_reading) {
+    LOG(LS_ERROR) << "ReadWavDataAsStereo: no longer reading file.";
+    return -1;
+  }
+
+  // The number of bytes that should be read from file.
+  const size_t totalBytesNeeded = _readSizeBytes;
+  // The number of bytes that will be written to the left and the right
+  // buffers.
+  const size_t bytesRequested = totalBytesNeeded >> 1;
+  if (bufferSize < bytesRequested) {
+    LOG(LS_ERROR) << "ReadWavDataAsStereo: Output buffers are too short!";
+    assert(false);
+    return -1;
+  }
+
+  int32_t bytesRead = ReadWavData(wav, _tempData, totalBytesNeeded);
+  if (bytesRead <= 0) {
+    LOG(LS_ERROR) << "ReadWavDataAsStereo: failed to read data from WAV file.";
+    return -1;
+  }
+
+  // Turn interleaved audio to left and right buffer. Note samples can be
+  // either 1 or 2 bytes
+  if (_bytesPerSample == 1) {
+    for (size_t i = 0; i < bytesRequested; i++) {
+      outDataLeft[i] = _tempData[2 * i];
+      outDataRight[i] = _tempData[(2 * i) + 1];
+    }
+  } else if (_bytesPerSample == 2) {
+    int16_t* sampleData = reinterpret_cast<int16_t*>(_tempData);
+    int16_t* outLeft = reinterpret_cast<int16_t*>(outDataLeft);
+    int16_t* outRight = reinterpret_cast<int16_t*>(outDataRight);
+
+    // Bytes requested to samples requested.
+    size_t sampleCount = bytesRequested >> 1;
+    for (size_t i = 0; i < sampleCount; i++) {
+      outLeft[i] = sampleData[2 * i];
+      outRight[i] = sampleData[(2 * i) + 1];
+    }
+  } else {
+    LOG(LS_ERROR) << "ReadWavStereoData: unsupported sample size "
+                  << _bytesPerSample << "!";
+    assert(false);
+    return -1;
+  }
+  return static_cast<int32_t>(bytesRequested);
 }
 
 int32_t ModuleFileUtility::ReadWavData(InStream& wav,
                                        uint8_t* buffer,
-                                       size_t dataLengthInBytes)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavData(wav= " << &wav
-                    << ", buffer= " << static_cast<void*>(buffer)
-                    << ", dataLen= " << dataLengthInBytes << ")";
+                                       size_t dataLengthInBytes) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadWavData(wav= " << &wav
+                  << ", buffer= " << static_cast<void*>(buffer)
+                  << ", dataLen= " << dataLengthInBytes << ")";
 
-    if(buffer == NULL)
-    {
-        LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer NULL!";
-        return -1;
+  if (buffer == NULL) {
+    LOG(LS_ERROR) << "ReadWavDataAsMono: output buffer NULL!";
+    return -1;
+  }
+
+  // Make sure that a read won't return too few samples.
+  // TODO (hellner): why not read the remaining bytes needed from the start
+  //                 of the file?
+  if (_dataSize < (_readPos + dataLengthInBytes)) {
+    // Rewind() being -1 may be due to the file not supposed to be looped.
+    if (wav.Rewind() == -1) {
+      _reading = false;
+      return 0;
     }
-
-    // Make sure that a read won't return too few samples.
-    // TODO (hellner): why not read the remaining bytes needed from the start
-    //                 of the file?
-    if(_dataSize < (_readPos + dataLengthInBytes))
-    {
-        // Rewind() being -1 may be due to the file not supposed to be looped.
-        if(wav.Rewind() == -1)
-        {
-            _reading = false;
-            return 0;
-        }
-        if(InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1)
-        {
-            _reading = false;
-            return -1;
-        }
+    if (InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1) {
+      _reading = false;
+      return -1;
     }
+  }
 
-    int32_t bytesRead = wav.Read(buffer, dataLengthInBytes);
-    if(bytesRead < 0)
-    {
+  int32_t bytesRead = wav.Read(buffer, dataLengthInBytes);
+  if (bytesRead < 0) {
+    _reading = false;
+    return -1;
+  }
+
+  // This should never happen due to earlier sanity checks.
+  // TODO (hellner): change to an assert and fail here since this should
+  //                 never happen...
+  if (bytesRead < (int32_t)dataLengthInBytes) {
+    if ((wav.Rewind() == -1) ||
+        (InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1)) {
+      _reading = false;
+      return -1;
+    } else {
+      bytesRead = wav.Read(buffer, dataLengthInBytes);
+      if (bytesRead < (int32_t)dataLengthInBytes) {
         _reading = false;
         return -1;
+      }
     }
+  }
 
-    // This should never happen due to earlier sanity checks.
-    // TODO (hellner): change to an assert and fail here since this should
-    //                 never happen...
-    if(bytesRead < (int32_t)dataLengthInBytes)
-    {
-        if((wav.Rewind() == -1) ||
-            (InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1))
-        {
-            _reading = false;
-            return -1;
-        }
-        else
-        {
-            bytesRead = wav.Read(buffer, dataLengthInBytes);
-            if(bytesRead < (int32_t)dataLengthInBytes)
-            {
-                _reading = false;
-                return -1;
-            }
-        }
+  _readPos += bytesRead;
+
+  // TODO (hellner): Why is dataLengthInBytes let dictate the number of bytes
+  //                 to read when exactly 10ms should be read?!
+  _playoutPositionMs += 10;
+  if ((_stopPointInMs > 0) && (_playoutPositionMs >= _stopPointInMs)) {
+    if ((wav.Rewind() == -1) ||
+        (InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1)) {
+      _reading = false;
     }
-
-    _readPos += bytesRead;
-
-    // TODO (hellner): Why is dataLengthInBytes let dictate the number of bytes
-    //                 to read when exactly 10ms should be read?!
-    _playoutPositionMs += 10;
-    if((_stopPointInMs > 0) &&
-        (_playoutPositionMs >= _stopPointInMs))
-    {
-        if((wav.Rewind() == -1) ||
-            (InitWavReading(wav, _startPointInMs, _stopPointInMs) == -1))
-        {
-            _reading = false;
-        }
-    }
-    return bytesRead;
+  }
+  return bytesRead;
 }
 
 int32_t ModuleFileUtility::InitWavWriting(OutStream& wav,
-                                          const CodecInst& codecInst)
-{
+                                          const CodecInst& codecInst) {
+  if (set_codec_info(codecInst) != 0) {
+    LOG(LS_ERROR) << "codecInst identifies unsupported codec!";
+    return -1;
+  }
+  _writing = false;
+  size_t channels = (codecInst.channels == 0) ? 1 : codecInst.channels;
 
-    if(set_codec_info(codecInst) != 0)
-    {
-        LOG(LS_ERROR) << "codecInst identifies unsupported codec!";
-        return -1;
+  if (STR_CASE_CMP(codecInst.plname, "PCMU") == 0) {
+    _bytesPerSample = 1;
+    if (WriteWavHeader(wav, 8000, _bytesPerSample, channels, kWavFormatMuLaw,
+                       0) == -1) {
+      return -1;
     }
-    _writing = false;
-    size_t channels = (codecInst.channels == 0) ? 1 : codecInst.channels;
-
-    if(STR_CASE_CMP(codecInst.plname, "PCMU") == 0)
-    {
-        _bytesPerSample = 1;
-        if(WriteWavHeader(wav, 8000, _bytesPerSample, channels,
-                          kWavFormatMuLaw, 0) == -1)
-        {
-            return -1;
-        }
+  } else if (STR_CASE_CMP(codecInst.plname, "PCMA") == 0) {
+    _bytesPerSample = 1;
+    if (WriteWavHeader(wav, 8000, _bytesPerSample, channels, kWavFormatALaw,
+                       0) == -1) {
+      return -1;
     }
-    else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
-    {
-        _bytesPerSample = 1;
-        if(WriteWavHeader(wav, 8000, _bytesPerSample, channels, kWavFormatALaw,
-                          0) == -1)
-        {
-            return -1;
-        }
+  } else if (STR_CASE_CMP(codecInst.plname, "L16") == 0) {
+    _bytesPerSample = 2;
+    if (WriteWavHeader(wav, codecInst.plfreq, _bytesPerSample, channels,
+                       kWavFormatPcm, 0) == -1) {
+      return -1;
     }
-    else if(STR_CASE_CMP(codecInst.plname, "L16") == 0)
-    {
-        _bytesPerSample = 2;
-        if(WriteWavHeader(wav, codecInst.plfreq, _bytesPerSample, channels,
-                          kWavFormatPcm, 0) == -1)
-        {
-            return -1;
-        }
-    }
-    else
-    {
-        LOG(LS_ERROR) << "codecInst identifies unsupported codec for WAV file!";
-        return -1;
-    }
-    _writing = true;
-    _bytesWritten = 0;
-    return 0;
+  } else {
+    LOG(LS_ERROR) << "codecInst identifies unsupported codec for WAV file!";
+    return -1;
+  }
+  _writing = true;
+  _bytesWritten = 0;
+  return 0;
 }
 
 int32_t ModuleFileUtility::WriteWavData(OutStream& out,
-                                        const int8_t*  buffer,
-                                        const size_t dataLength)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::WriteWavData(out= " << &out
-                    << ", buf= " << static_cast<const void*>(buffer)
-                    << ", dataLen= " << dataLength << ")";
+                                        const int8_t* buffer,
+                                        const size_t dataLength) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::WriteWavData(out= " << &out
+                  << ", buf= " << static_cast<const void*>(buffer)
+                  << ", dataLen= " << dataLength << ")";
 
-    if(buffer == NULL)
-    {
-        LOG(LS_ERROR) << "WriteWavData: input buffer NULL!";
-        return -1;
-    }
+  if (buffer == NULL) {
+    LOG(LS_ERROR) << "WriteWavData: input buffer NULL!";
+    return -1;
+  }
 
-    if(!out.Write(buffer, dataLength))
-    {
-        return -1;
-    }
-    _bytesWritten += dataLength;
-    return static_cast<int32_t>(dataLength);
+  if (!out.Write(buffer, dataLength)) {
+    return -1;
+  }
+  _bytesWritten += dataLength;
+  return static_cast<int32_t>(dataLength);
 }
 
+int32_t ModuleFileUtility::WriteWavHeader(OutStream& wav,
+                                          uint32_t freq,
+                                          size_t bytesPerSample,
+                                          size_t channels,
+                                          uint32_t format,
+                                          size_t lengthInBytes) {
+  // Frame size in bytes for 10 ms of audio.
+  // TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
+  //                 be taken into consideration here!
+  const size_t frameSize = (freq / 100) * channels;
 
-int32_t ModuleFileUtility::WriteWavHeader(
-    OutStream& wav,
-    uint32_t freq,
-    size_t bytesPerSample,
-    size_t channels,
-    uint32_t format,
-    size_t lengthInBytes)
-{
-    // Frame size in bytes for 10 ms of audio.
-    // TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
-    //                 be taken into consideration here!
-    const size_t frameSize = (freq / 100) * channels;
+  // Calculate the number of full frames that the wave file contain.
+  const size_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
 
-    // Calculate the number of full frames that the wave file contain.
-    const size_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
+  uint8_t buf[kWavHeaderSize];
+  webrtc::WriteWavHeader(buf, channels, freq, static_cast<WavFormat>(format),
+                         bytesPerSample, dataLengthInBytes / bytesPerSample);
+  wav.Write(buf, kWavHeaderSize);
+  return 0;
+}
 
-    uint8_t buf[kWavHeaderSize];
-    webrtc::WriteWavHeader(buf, channels, freq, static_cast<WavFormat>(format),
-                           bytesPerSample, dataLengthInBytes / bytesPerSample);
-    wav.Write(buf, kWavHeaderSize);
+int32_t ModuleFileUtility::UpdateWavHeader(OutStream& wav) {
+  int32_t res = -1;
+  if (wav.Rewind() == -1) {
+    return -1;
+  }
+  size_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
+
+  if (STR_CASE_CMP(codec_info_.plname, "L16") == 0) {
+    res = WriteWavHeader(wav, codec_info_.plfreq, 2, channels, kWavFormatPcm,
+                         _bytesWritten);
+  } else if (STR_CASE_CMP(codec_info_.plname, "PCMU") == 0) {
+    res =
+        WriteWavHeader(wav, 8000, 1, channels, kWavFormatMuLaw, _bytesWritten);
+  } else if (STR_CASE_CMP(codec_info_.plname, "PCMA") == 0) {
+    res = WriteWavHeader(wav, 8000, 1, channels, kWavFormatALaw, _bytesWritten);
+  } else {
+    // Allow calling this API even if not writing to a WAVE file.
+    // TODO (hellner): why?!
     return 0;
+  }
+  return res;
 }
 
-int32_t ModuleFileUtility::UpdateWavHeader(OutStream& wav)
-{
-    int32_t res = -1;
-    if(wav.Rewind() == -1)
-    {
-        return -1;
-    }
-    size_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
-
-    if(STR_CASE_CMP(codec_info_.plname, "L16") == 0)
-    {
-        res = WriteWavHeader(wav, codec_info_.plfreq, 2, channels,
-                             kWavFormatPcm, _bytesWritten);
-    } else if(STR_CASE_CMP(codec_info_.plname, "PCMU") == 0) {
-            res = WriteWavHeader(wav, 8000, 1, channels, kWavFormatMuLaw,
-                                 _bytesWritten);
-    } else if(STR_CASE_CMP(codec_info_.plname, "PCMA") == 0) {
-            res = WriteWavHeader(wav, 8000, 1, channels, kWavFormatALaw,
-                                 _bytesWritten);
-    } else {
-        // Allow calling this API even if not writing to a WAVE file.
-        // TODO (hellner): why?!
-        return 0;
-    }
-    return res;
-}
-
-
 int32_t ModuleFileUtility::InitPreEncodedReading(InStream& in,
-                                                 const CodecInst& cinst)
-{
+                                                 const CodecInst& cinst) {
+  uint8_t preEncodedID;
+  in.Read(&preEncodedID, 1);
 
-    uint8_t preEncodedID;
-    in.Read(&preEncodedID, 1);
+  MediaFileUtility_CodecType codecType =
+      (MediaFileUtility_CodecType)preEncodedID;
 
-    MediaFileUtility_CodecType codecType =
-        (MediaFileUtility_CodecType)preEncodedID;
-
-    if(set_codec_info(cinst) != 0)
-    {
-        LOG(LS_ERROR) << "Pre-encoded file send codec mismatch!";
-        return -1;
-    }
-    if(codecType != _codecId)
-    {
-        LOG(LS_ERROR) << "Pre-encoded file format codec mismatch!";
-        return -1;
-    }
-    memcpy(&codec_info_,&cinst,sizeof(CodecInst));
-    _reading = true;
-    return 0;
+  if (set_codec_info(cinst) != 0) {
+    LOG(LS_ERROR) << "Pre-encoded file send codec mismatch!";
+    return -1;
+  }
+  if (codecType != _codecId) {
+    LOG(LS_ERROR) << "Pre-encoded file format codec mismatch!";
+    return -1;
+  }
+  memcpy(&codec_info_, &cinst, sizeof(CodecInst));
+  _reading = true;
+  return 0;
 }
 
-int32_t ModuleFileUtility::ReadPreEncodedData(
-    InStream& in,
-    int8_t* outData,
-    const size_t bufferSize)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadPreEncodedData(in= " << &in
-                    << ", outData= " << static_cast<void*>(outData)
-                    << ", bufferSize= " << bufferSize << ")";
+int32_t ModuleFileUtility::ReadPreEncodedData(InStream& in,
+                                              int8_t* outData,
+                                              const size_t bufferSize) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadPreEncodedData(in= " << &in
+                  << ", outData= " << static_cast<void*>(outData)
+                  << ", bufferSize= " << bufferSize << ")";
 
-    if(outData == NULL)
-    {
-        LOG(LS_ERROR) << "output buffer NULL";
-    }
+  if (outData == NULL) {
+    LOG(LS_ERROR) << "output buffer NULL";
+  }
 
-    size_t frameLen;
-    uint8_t buf[64];
-    // Each frame has a two byte header containing the frame length.
-    int32_t res = in.Read(buf, 2);
-    if(res != 2)
-    {
-        if(!in.Rewind())
-        {
-            // The first byte is the codec identifier.
-            in.Read(buf, 1);
-            res = in.Read(buf, 2);
-        }
-        else
-        {
-            return -1;
-        }
+  size_t frameLen;
+  uint8_t buf[64];
+  // Each frame has a two byte header containing the frame length.
+  int32_t res = in.Read(buf, 2);
+  if (res != 2) {
+    if (!in.Rewind()) {
+      // The first byte is the codec identifier.
+      in.Read(buf, 1);
+      res = in.Read(buf, 2);
+    } else {
+      return -1;
     }
-    frameLen = buf[0] + buf[1] * 256;
-    if(bufferSize < frameLen)
-    {
-        LOG(LS_ERROR) << "buffer not large enough to read " << frameLen
-                      << " bytes of pre-encoded data!";
-        return -1;
-    }
-    return in.Read(outData, frameLen);
+  }
+  frameLen = buf[0] + buf[1] * 256;
+  if (bufferSize < frameLen) {
+    LOG(LS_ERROR) << "buffer not large enough to read " << frameLen
+                  << " bytes of pre-encoded data!";
+    return -1;
+  }
+  return in.Read(outData, frameLen);
 }
 
-int32_t ModuleFileUtility::InitPreEncodedWriting(
-    OutStream& out,
-    const CodecInst& codecInst)
-{
-
-    if(set_codec_info(codecInst) != 0)
-    {
-        LOG(LS_ERROR) << "CodecInst not recognized!";
-        return -1;
-    }
-    _writing = true;
-    _bytesWritten = 1;
-    out.Write(&_codecId, 1);
-    return 0;
+int32_t ModuleFileUtility::InitPreEncodedWriting(OutStream& out,
+                                                 const CodecInst& codecInst) {
+  if (set_codec_info(codecInst) != 0) {
+    LOG(LS_ERROR) << "CodecInst not recognized!";
+    return -1;
+  }
+  _writing = true;
+  _bytesWritten = 1;
+  out.Write(&_codecId, 1);
+  return 0;
 }
 
-int32_t ModuleFileUtility::WritePreEncodedData(
-    OutStream& out,
-    const int8_t* buffer,
-    const size_t dataLength)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::WritePreEncodedData(out= " << &out
-                    << " , inData= " << static_cast<const void*>(buffer)
-                    << ", dataLen= " << dataLength << ")";
+int32_t ModuleFileUtility::WritePreEncodedData(OutStream& out,
+                                               const int8_t* buffer,
+                                               const size_t dataLength) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::WritePreEncodedData(out= " << &out
+                  << " , inData= " << static_cast<const void*>(buffer)
+                  << ", dataLen= " << dataLength << ")";
 
-    if(buffer == NULL)
-    {
-        LOG(LS_ERROR) << "buffer NULL";
-    }
+  if (buffer == NULL) {
+    LOG(LS_ERROR) << "buffer NULL";
+  }
 
-    size_t bytesWritten = 0;
-    // The first two bytes is the size of the frame.
-    int16_t lengthBuf;
-    lengthBuf = (int16_t)dataLength;
-    if(dataLength > static_cast<size_t>(std::numeric_limits<int16_t>::max()) ||
-       !out.Write(&lengthBuf, 2))
-    {
-       return -1;
-    }
-    bytesWritten = 2;
+  size_t bytesWritten = 0;
+  // The first two bytes is the size of the frame.
+  int16_t lengthBuf;
+  lengthBuf = (int16_t)dataLength;
+  if (dataLength > static_cast<size_t>(std::numeric_limits<int16_t>::max()) ||
+      !out.Write(&lengthBuf, 2)) {
+    return -1;
+  }
+  bytesWritten = 2;
 
-    if(!out.Write(buffer, dataLength))
-    {
-        return -1;
-    }
-    bytesWritten += dataLength;
-    return static_cast<int32_t>(bytesWritten);
+  if (!out.Write(buffer, dataLength)) {
+    return -1;
+  }
+  bytesWritten += dataLength;
+  return static_cast<int32_t>(bytesWritten);
 }
 
-int32_t ModuleFileUtility::InitCompressedReading(
-    InStream& in,
-    const uint32_t start,
-    const uint32_t stop)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::InitCompressedReading(in= " << &in
-                    << ", start= " << start << ", stop= " << stop << ")";
+int32_t ModuleFileUtility::InitCompressedReading(InStream& in,
+                                                 const uint32_t start,
+                                                 const uint32_t stop) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::InitCompressedReading(in= " << &in
+                  << ", start= " << start << ", stop= " << stop << ")";
 
 #if defined(WEBRTC_CODEC_ILBC)
-    int16_t read_len = 0;
+  int16_t read_len = 0;
 #endif
-    _codecId = kCodecNoCodec;
-    _playoutPositionMs = 0;
-    _reading = false;
+  _codecId = kCodecNoCodec;
+  _playoutPositionMs = 0;
+  _reading = false;
 
-    _startPointInMs = start;
-    _stopPointInMs = stop;
+  _startPointInMs = start;
+  _stopPointInMs = stop;
 
-    // Read the codec name
-    int32_t cnt = 0;
-    char buf[64];
-    do
-    {
-        in.Read(&buf[cnt++], 1);
-    } while ((buf[cnt-1] != '\n') && (64 > cnt));
+  // Read the codec name
+  int32_t cnt = 0;
+  char buf[64];
+  do {
+    in.Read(&buf[cnt++], 1);
+  } while ((buf[cnt - 1] != '\n') && (64 > cnt));
 
-    if(cnt==64)
-    {
-        return -1;
-    }
-    buf[cnt]=0;
+  if (cnt == 64) {
+    return -1;
+  }
+  buf[cnt] = 0;
 
 #ifdef WEBRTC_CODEC_ILBC
-    if(!strcmp("#!iLBC20\n", buf))
-    {
-        codec_info_.pltype = 102;
-        strcpy(codec_info_.plname, "ilbc");
-        codec_info_.plfreq   = 8000;
-        codec_info_.pacsize  = 160;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 13300;
-        _codecId = kCodecIlbc20Ms;
+  if (!strcmp("#!iLBC20\n", buf)) {
+    codec_info_.pltype = 102;
+    strcpy(codec_info_.plname, "ilbc");
+    codec_info_.plfreq = 8000;
+    codec_info_.pacsize = 160;
+    codec_info_.channels = 1;
+    codec_info_.rate = 13300;
+    _codecId = kCodecIlbc20Ms;
 
-        if(_startPointInMs > 0)
-        {
-            while (_playoutPositionMs <= _startPointInMs)
-            {
-                read_len = in.Read(buf, 38);
-                if(read_len != 38)
-                {
-                    return -1;
-                }
-                _playoutPositionMs += 20;
-            }
+    if (_startPointInMs > 0) {
+      while (_playoutPositionMs <= _startPointInMs) {
+        read_len = in.Read(buf, 38);
+        if (read_len != 38) {
+          return -1;
         }
+        _playoutPositionMs += 20;
+      }
     }
+  }
 
-    if(!strcmp("#!iLBC30\n", buf))
-    {
-        codec_info_.pltype = 102;
-        strcpy(codec_info_.plname, "ilbc");
-        codec_info_.plfreq   = 8000;
-        codec_info_.pacsize  = 240;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 13300;
-        _codecId = kCodecIlbc30Ms;
+  if (!strcmp("#!iLBC30\n", buf)) {
+    codec_info_.pltype = 102;
+    strcpy(codec_info_.plname, "ilbc");
+    codec_info_.plfreq = 8000;
+    codec_info_.pacsize = 240;
+    codec_info_.channels = 1;
+    codec_info_.rate = 13300;
+    _codecId = kCodecIlbc30Ms;
 
-        if(_startPointInMs > 0)
-        {
-            while (_playoutPositionMs <= _startPointInMs)
-            {
-                read_len = in.Read(buf, 50);
-                if(read_len != 50)
-                {
-                    return -1;
-                }
-                _playoutPositionMs += 20;
-            }
+    if (_startPointInMs > 0) {
+      while (_playoutPositionMs <= _startPointInMs) {
+        read_len = in.Read(buf, 50);
+        if (read_len != 50) {
+          return -1;
         }
+        _playoutPositionMs += 20;
+      }
     }
+  }
 #endif
-    if(_codecId == kCodecNoCodec)
-    {
-        return -1;
-    }
-    _reading = true;
-    return 0;
+  if (_codecId == kCodecNoCodec) {
+    return -1;
+  }
+  _reading = true;
+  return 0;
 }
 
 int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
                                               int8_t* outData,
-                                              size_t bufferSize)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadCompressedData(in=" << &in
-                    << ", outData=" << static_cast<void*>(outData) << ", bytes="
-                    << bufferSize << ")";
+                                              size_t bufferSize) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadCompressedData(in=" << &in
+                  << ", outData=" << static_cast<void*>(outData)
+                  << ", bytes=" << bufferSize << ")";
 
-    int bytesRead = 0;
+  int bytesRead = 0;
 
-    if(! _reading)
-    {
-        LOG(LS_ERROR) << "not currently reading!";
-        return -1;
-    }
-
-#ifdef WEBRTC_CODEC_ILBC
-    if((_codecId == kCodecIlbc20Ms) ||
-        (_codecId == kCodecIlbc30Ms))
-    {
-        size_t byteSize = 0;
-        if(_codecId == kCodecIlbc30Ms)
-        {
-            byteSize = 50;
-        }
-        if(_codecId == kCodecIlbc20Ms)
-        {
-            byteSize = 38;
-        }
-        if(bufferSize < byteSize)
-        {
-            LOG(LS_ERROR)
-                << "output buffer is too short to read ILBC compressed data.";
-            assert(false);
-            return -1;
-        }
-
-        bytesRead = in.Read(outData, byteSize);
-        if(bytesRead != static_cast<int>(byteSize))
-        {
-            if(!in.Rewind())
-            {
-                InitCompressedReading(in, _startPointInMs, _stopPointInMs);
-                bytesRead = in.Read(outData, byteSize);
-                if(bytesRead != static_cast<int>(byteSize))
-                {
-                    _reading = false;
-                    return -1;
-                }
-            }
-            else
-            {
-                _reading = false;
-                return -1;
-            }
-        }
-    }
-#endif
-    if(bytesRead == 0)
-    {
-        LOG(LS_ERROR)
-            << "ReadCompressedData() no bytes read, codec not supported";
-        return -1;
-    }
-
-    _playoutPositionMs += 20;
-    if((_stopPointInMs > 0) &&
-        (_playoutPositionMs >= _stopPointInMs))
-    {
-        if(!in.Rewind())
-        {
-            InitCompressedReading(in, _startPointInMs, _stopPointInMs);
-        }
-        else
-        {
-            _reading = false;
-        }
-    }
-    return bytesRead;
-}
-
-int32_t ModuleFileUtility::InitCompressedWriting(
-    OutStream& out,
-    const CodecInst& codecInst)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::InitCompressedWriting(out= " << &out
-                    << ", codecName= " << codecInst.plname << ")";
-
-    _writing = false;
-
-#ifdef WEBRTC_CODEC_ILBC
-    if(STR_CASE_CMP(codecInst.plname, "ilbc") == 0)
-    {
-        if(codecInst.pacsize == 160)
-        {
-            _codecId = kCodecIlbc20Ms;
-            out.Write("#!iLBC20\n",9);
-        }
-        else if(codecInst.pacsize == 240)
-        {
-            _codecId = kCodecIlbc30Ms;
-            out.Write("#!iLBC30\n",9);
-        }
-        else
-        {
-          LOG(LS_ERROR) << "codecInst defines unsupported compression codec!";
-            return -1;
-        }
-        memcpy(&codec_info_,&codecInst,sizeof(CodecInst));
-        _writing = true;
-        return 0;
-    }
-#endif
-
-    LOG(LS_ERROR) << "codecInst defines unsupported compression codec!";
+  if (!_reading) {
+    LOG(LS_ERROR) << "not currently reading!";
     return -1;
+  }
+
+#ifdef WEBRTC_CODEC_ILBC
+  if ((_codecId == kCodecIlbc20Ms) || (_codecId == kCodecIlbc30Ms)) {
+    size_t byteSize = 0;
+    if (_codecId == kCodecIlbc30Ms) {
+      byteSize = 50;
+    }
+    if (_codecId == kCodecIlbc20Ms) {
+      byteSize = 38;
+    }
+    if (bufferSize < byteSize) {
+      LOG(LS_ERROR)
+          << "output buffer is too short to read ILBC compressed data.";
+      assert(false);
+      return -1;
+    }
+
+    bytesRead = in.Read(outData, byteSize);
+    if (bytesRead != static_cast<int>(byteSize)) {
+      if (!in.Rewind()) {
+        InitCompressedReading(in, _startPointInMs, _stopPointInMs);
+        bytesRead = in.Read(outData, byteSize);
+        if (bytesRead != static_cast<int>(byteSize)) {
+          _reading = false;
+          return -1;
+        }
+      } else {
+        _reading = false;
+        return -1;
+      }
+    }
+  }
+#endif
+  if (bytesRead == 0) {
+    LOG(LS_ERROR) << "ReadCompressedData() no bytes read, codec not supported";
+    return -1;
+  }
+
+  _playoutPositionMs += 20;
+  if ((_stopPointInMs > 0) && (_playoutPositionMs >= _stopPointInMs)) {
+    if (!in.Rewind()) {
+      InitCompressedReading(in, _startPointInMs, _stopPointInMs);
+    } else {
+      _reading = false;
+    }
+  }
+  return bytesRead;
 }
 
-int32_t ModuleFileUtility::WriteCompressedData(
-    OutStream& out,
-    const int8_t* buffer,
-    const size_t dataLength)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::WriteCompressedData(out= " << &out
-                    << ", buf= " << static_cast<const void*>(buffer)
-                    << ", dataLen= " << dataLength << ")";
+int32_t ModuleFileUtility::InitCompressedWriting(OutStream& out,
+                                                 const CodecInst& codecInst) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::InitCompressedWriting(out= " << &out
+                  << ", codecName= " << codecInst.plname << ")";
 
-    if(buffer == NULL)
-    {
-        LOG(LS_ERROR) << "buffer NULL";
-    }
+  _writing = false;
 
-    if(!out.Write(buffer, dataLength))
-    {
-        return -1;
+#ifdef WEBRTC_CODEC_ILBC
+  if (STR_CASE_CMP(codecInst.plname, "ilbc") == 0) {
+    if (codecInst.pacsize == 160) {
+      _codecId = kCodecIlbc20Ms;
+      out.Write("#!iLBC20\n", 9);
+    } else if (codecInst.pacsize == 240) {
+      _codecId = kCodecIlbc30Ms;
+      out.Write("#!iLBC30\n", 9);
+    } else {
+      LOG(LS_ERROR) << "codecInst defines unsupported compression codec!";
+      return -1;
     }
-    return static_cast<int32_t>(dataLength);
+    memcpy(&codec_info_, &codecInst, sizeof(CodecInst));
+    _writing = true;
+    return 0;
+  }
+#endif
+
+  LOG(LS_ERROR) << "codecInst defines unsupported compression codec!";
+  return -1;
+}
+
+int32_t ModuleFileUtility::WriteCompressedData(OutStream& out,
+                                               const int8_t* buffer,
+                                               const size_t dataLength) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::WriteCompressedData(out= " << &out
+                  << ", buf= " << static_cast<const void*>(buffer)
+                  << ", dataLen= " << dataLength << ")";
+
+  if (buffer == NULL) {
+    LOG(LS_ERROR) << "buffer NULL";
+  }
+
+  if (!out.Write(buffer, dataLength)) {
+    return -1;
+  }
+  return static_cast<int32_t>(dataLength);
 }
 
 int32_t ModuleFileUtility::InitPCMReading(InStream& pcm,
                                           const uint32_t start,
                                           const uint32_t stop,
-                                          uint32_t freq)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::InitPCMReading(pcm= " << &pcm
-                    << ", start=" << start << ", stop=" << stop << ", freq="
-                    << freq << ")";
+                                          uint32_t freq) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::InitPCMReading(pcm= " << &pcm
+                  << ", start=" << start << ", stop=" << stop
+                  << ", freq=" << freq << ")";
 
-    int8_t dummy[320];
-    int read_len;
+  int8_t dummy[320];
+  int read_len;
 
-    _playoutPositionMs = 0;
-    _startPointInMs = start;
-    _stopPointInMs = stop;
-    _reading = false;
+  _playoutPositionMs = 0;
+  _startPointInMs = start;
+  _stopPointInMs = stop;
+  _reading = false;
 
-    if(freq == 8000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 8000;
-        codec_info_.pacsize  = 160;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 128000;
-        _codecId = kCodecL16_8Khz;
-    }
-    else if(freq == 16000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 16000;
-        codec_info_.pacsize  = 320;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 256000;
-        _codecId = kCodecL16_16kHz;
-    }
-    else if(freq == 32000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 32000;
-        codec_info_.pacsize  = 320;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 512000;
-        _codecId = kCodecL16_32Khz;
-    }
-    else if(freq == 48000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 48000;
-        codec_info_.pacsize  = 480;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 768000;
-        _codecId = kCodecL16_48Khz;
-    }
+  if (freq == 8000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 8000;
+    codec_info_.pacsize = 160;
+    codec_info_.channels = 1;
+    codec_info_.rate = 128000;
+    _codecId = kCodecL16_8Khz;
+  } else if (freq == 16000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 16000;
+    codec_info_.pacsize = 320;
+    codec_info_.channels = 1;
+    codec_info_.rate = 256000;
+    _codecId = kCodecL16_16kHz;
+  } else if (freq == 32000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 32000;
+    codec_info_.pacsize = 320;
+    codec_info_.channels = 1;
+    codec_info_.rate = 512000;
+    _codecId = kCodecL16_32Khz;
+  } else if (freq == 48000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 48000;
+    codec_info_.pacsize = 480;
+    codec_info_.channels = 1;
+    codec_info_.rate = 768000;
+    _codecId = kCodecL16_48Khz;
+  }
 
-    // Readsize for 10ms of audio data (2 bytes per sample).
-    _readSizeBytes = 2 * codec_info_. plfreq / 100;
-    if(_startPointInMs > 0)
-    {
-        while (_playoutPositionMs < _startPointInMs)
-        {
-            read_len = pcm.Read(dummy, _readSizeBytes);
-            if(read_len != static_cast<int>(_readSizeBytes))
-            {
-                return -1;  // Must have reached EOF before start position!
-            }
-            _playoutPositionMs += 10;
-        }
+  // Readsize for 10ms of audio data (2 bytes per sample).
+  _readSizeBytes = 2 * codec_info_.plfreq / 100;
+  if (_startPointInMs > 0) {
+    while (_playoutPositionMs < _startPointInMs) {
+      read_len = pcm.Read(dummy, _readSizeBytes);
+      if (read_len != static_cast<int>(_readSizeBytes)) {
+        return -1;  // Must have reached EOF before start position!
+      }
+      _playoutPositionMs += 10;
     }
-    _reading = true;
-    return 0;
+  }
+  _reading = true;
+  return 0;
 }
 
 int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
                                        int8_t* outData,
-                                       size_t bufferSize)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::ReadPCMData(pcm= " << &pcm
-                    << ", outData= " << static_cast<void*>(outData)
-                    << ", bufSize= " << bufferSize << ")";
+                                       size_t bufferSize) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::ReadPCMData(pcm= " << &pcm
+                  << ", outData= " << static_cast<void*>(outData)
+                  << ", bufSize= " << bufferSize << ")";
 
-    if(outData == NULL)
-    {
-        LOG(LS_ERROR) << "buffer NULL";
-    }
+  if (outData == NULL) {
+    LOG(LS_ERROR) << "buffer NULL";
+  }
 
-    // Readsize for 10ms of audio data (2 bytes per sample).
-    size_t bytesRequested = static_cast<size_t>(2 * codec_info_.plfreq / 100);
-    if(bufferSize <  bytesRequested)
-    {
-        LOG(LS_ERROR)
-            << "ReadPCMData: buffer not long enough for a 10ms frame.";
-        assert(false);
+  // Readsize for 10ms of audio data (2 bytes per sample).
+  size_t bytesRequested = static_cast<size_t>(2 * codec_info_.plfreq / 100);
+  if (bufferSize < bytesRequested) {
+    LOG(LS_ERROR) << "ReadPCMData: buffer not long enough for a 10ms frame.";
+    assert(false);
+    return -1;
+  }
+
+  int bytesRead = pcm.Read(outData, bytesRequested);
+  if (bytesRead < static_cast<int>(bytesRequested)) {
+    if (pcm.Rewind() == -1) {
+      _reading = false;
+    } else {
+      if (InitPCMReading(pcm, _startPointInMs, _stopPointInMs,
+                         codec_info_.plfreq) == -1) {
+        _reading = false;
+      } else {
+        size_t rest = bytesRequested - bytesRead;
+        int len = pcm.Read(&(outData[bytesRead]), rest);
+        if (len == static_cast<int>(rest)) {
+          bytesRead += len;
+        } else {
+          _reading = false;
+        }
+      }
+      if (bytesRead <= 0) {
+        LOG(LS_ERROR) << "ReadPCMData: Failed to rewind audio file.";
         return -1;
+      }
     }
+  }
 
-    int bytesRead = pcm.Read(outData, bytesRequested);
-    if(bytesRead < static_cast<int>(bytesRequested))
-    {
-        if(pcm.Rewind() == -1)
-        {
-            _reading = false;
-        }
-        else
-        {
-            if(InitPCMReading(pcm, _startPointInMs, _stopPointInMs,
-                              codec_info_.plfreq) == -1)
-            {
-                _reading = false;
-            }
-            else
-            {
-                size_t rest = bytesRequested - bytesRead;
-                int len = pcm.Read(&(outData[bytesRead]), rest);
-                if(len == static_cast<int>(rest))
-                {
-                    bytesRead += len;
-                }
-                else
-                {
-                    _reading = false;
-                }
-            }
-            if(bytesRead <= 0)
-            {
-                LOG(LS_ERROR) << "ReadPCMData: Failed to rewind audio file.";
-                return -1;
-            }
-        }
+  if (bytesRead <= 0) {
+    LOG(LS_VERBOSE) << "ReadPCMData: end of file";
+    return -1;
+  }
+  _playoutPositionMs += 10;
+  if (_stopPointInMs && _playoutPositionMs >= _stopPointInMs) {
+    if (!pcm.Rewind()) {
+      if (InitPCMReading(pcm, _startPointInMs, _stopPointInMs,
+                         codec_info_.plfreq) == -1) {
+        _reading = false;
+      }
     }
-
-    if(bytesRead <= 0)
-    {
-        LOG(LS_VERBOSE) << "ReadPCMData: end of file";
-        return -1;
-    }
-    _playoutPositionMs += 10;
-    if(_stopPointInMs && _playoutPositionMs >= _stopPointInMs)
-    {
-        if(!pcm.Rewind())
-        {
-            if(InitPCMReading(pcm, _startPointInMs, _stopPointInMs,
-                              codec_info_.plfreq) == -1)
-            {
-                _reading = false;
-            }
-        }
-    }
-    return bytesRead;
+  }
+  return bytesRead;
 }
 
-int32_t ModuleFileUtility::InitPCMWriting(OutStream& out, uint32_t freq)
-{
+int32_t ModuleFileUtility::InitPCMWriting(OutStream& out, uint32_t freq) {
+  if (freq == 8000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 8000;
+    codec_info_.pacsize = 160;
+    codec_info_.channels = 1;
+    codec_info_.rate = 128000;
 
-    if(freq == 8000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 8000;
-        codec_info_.pacsize  = 160;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 128000;
+    _codecId = kCodecL16_8Khz;
+  } else if (freq == 16000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 16000;
+    codec_info_.pacsize = 320;
+    codec_info_.channels = 1;
+    codec_info_.rate = 256000;
 
-        _codecId = kCodecL16_8Khz;
-    }
-    else if(freq == 16000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 16000;
-        codec_info_.pacsize  = 320;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 256000;
+    _codecId = kCodecL16_16kHz;
+  } else if (freq == 32000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 32000;
+    codec_info_.pacsize = 320;
+    codec_info_.channels = 1;
+    codec_info_.rate = 512000;
 
-        _codecId = kCodecL16_16kHz;
-    }
-    else if(freq == 32000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 32000;
-        codec_info_.pacsize  = 320;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 512000;
+    _codecId = kCodecL16_32Khz;
+  } else if (freq == 48000) {
+    strcpy(codec_info_.plname, "L16");
+    codec_info_.pltype = -1;
+    codec_info_.plfreq = 48000;
+    codec_info_.pacsize = 480;
+    codec_info_.channels = 1;
+    codec_info_.rate = 768000;
 
-        _codecId = kCodecL16_32Khz;
-    }
-    else if(freq == 48000)
-    {
-        strcpy(codec_info_.plname, "L16");
-        codec_info_.pltype   = -1;
-        codec_info_.plfreq   = 48000;
-        codec_info_.pacsize  = 480;
-        codec_info_.channels = 1;
-        codec_info_.rate     = 768000;
-
-        _codecId = kCodecL16_48Khz;
-    }
-    if((_codecId != kCodecL16_8Khz) &&
-       (_codecId != kCodecL16_16kHz) &&
-       (_codecId != kCodecL16_32Khz) &&
-       (_codecId != kCodecL16_48Khz))
-    {
-        LOG(LS_ERROR) << "CodecInst is not 8KHz, 16KHz, 32kHz or 48kHz PCM!";
-        return -1;
-    }
-    _writing = true;
-    _bytesWritten = 0;
-    return 0;
+    _codecId = kCodecL16_48Khz;
+  }
+  if ((_codecId != kCodecL16_8Khz) && (_codecId != kCodecL16_16kHz) &&
+      (_codecId != kCodecL16_32Khz) && (_codecId != kCodecL16_48Khz)) {
+    LOG(LS_ERROR) << "CodecInst is not 8KHz, 16KHz, 32kHz or 48kHz PCM!";
+    return -1;
+  }
+  _writing = true;
+  _bytesWritten = 0;
+  return 0;
 }
 
 int32_t ModuleFileUtility::WritePCMData(OutStream& out,
-                                        const int8_t*  buffer,
-                                        const size_t dataLength)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::WritePCMData(out= " << &out
-                    << ", buf= " << static_cast<const void*>(buffer)
-                    << ", dataLen= " << dataLength << ")";
+                                        const int8_t* buffer,
+                                        const size_t dataLength) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::WritePCMData(out= " << &out
+                  << ", buf= " << static_cast<const void*>(buffer)
+                  << ", dataLen= " << dataLength << ")";
 
-    if(buffer == NULL)
-    {
-        LOG(LS_ERROR) << "buffer NULL";
-    }
+  if (buffer == NULL) {
+    LOG(LS_ERROR) << "buffer NULL";
+  }
 
-    if(!out.Write(buffer, dataLength))
-    {
-        return -1;
-    }
+  if (!out.Write(buffer, dataLength)) {
+    return -1;
+  }
 
-    _bytesWritten += dataLength;
-    return static_cast<int32_t>(dataLength);
+  _bytesWritten += dataLength;
+  return static_cast<int32_t>(dataLength);
 }
 
-int32_t ModuleFileUtility::codec_info(CodecInst& codecInst)
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::codec_info(codecInst= " << &codecInst
-                    << ")";
+int32_t ModuleFileUtility::codec_info(CodecInst& codecInst) {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::codec_info(codecInst= " << &codecInst
+                  << ")";
 
-    if(!_reading && !_writing)
-    {
-        LOG(LS_ERROR) << "CodecInst: not currently reading audio file!";
-        return -1;
-    }
-    memcpy(&codecInst,&codec_info_,sizeof(CodecInst));
-    return 0;
+  if (!_reading && !_writing) {
+    LOG(LS_ERROR) << "CodecInst: not currently reading audio file!";
+    return -1;
+  }
+  memcpy(&codecInst, &codec_info_, sizeof(CodecInst));
+  return 0;
 }
 
-int32_t ModuleFileUtility::set_codec_info(const CodecInst& codecInst)
-{
-
-    _codecId = kCodecNoCodec;
-    if(STR_CASE_CMP(codecInst.plname, "PCMU") == 0)
-    {
-        _codecId = kCodecPcmu;
+int32_t ModuleFileUtility::set_codec_info(const CodecInst& codecInst) {
+  _codecId = kCodecNoCodec;
+  if (STR_CASE_CMP(codecInst.plname, "PCMU") == 0) {
+    _codecId = kCodecPcmu;
+  } else if (STR_CASE_CMP(codecInst.plname, "PCMA") == 0) {
+    _codecId = kCodecPcma;
+  } else if (STR_CASE_CMP(codecInst.plname, "L16") == 0) {
+    if (codecInst.plfreq == 8000) {
+      _codecId = kCodecL16_8Khz;
+    } else if (codecInst.plfreq == 16000) {
+      _codecId = kCodecL16_16kHz;
+    } else if (codecInst.plfreq == 32000) {
+      _codecId = kCodecL16_32Khz;
+    } else if (codecInst.plfreq == 48000) {
+      _codecId = kCodecL16_48Khz;
     }
-    else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
-    {
-        _codecId = kCodecPcma;
-    }
-    else if(STR_CASE_CMP(codecInst.plname, "L16") == 0)
-    {
-        if(codecInst.plfreq == 8000)
-        {
-            _codecId = kCodecL16_8Khz;
-        }
-        else if(codecInst.plfreq == 16000)
-        {
-            _codecId = kCodecL16_16kHz;
-        }
-        else if(codecInst.plfreq == 32000)
-        {
-            _codecId = kCodecL16_32Khz;
-        }
-        else if(codecInst.plfreq == 48000)
-        {
-            _codecId = kCodecL16_48Khz;
-        }
-    }
+  }
 #ifdef WEBRTC_CODEC_ILBC
-    else if(STR_CASE_CMP(codecInst.plname, "ilbc") == 0)
-    {
-        if(codecInst.pacsize == 160)
-        {
-            _codecId = kCodecIlbc20Ms;
-        }
-        else if(codecInst.pacsize == 240)
-        {
-            _codecId = kCodecIlbc30Ms;
-        }
+  else if (STR_CASE_CMP(codecInst.plname, "ilbc") == 0) {
+    if (codecInst.pacsize == 160) {
+      _codecId = kCodecIlbc20Ms;
+    } else if (codecInst.pacsize == 240) {
+      _codecId = kCodecIlbc30Ms;
     }
+  }
 #endif
-#if(defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
-    else if(STR_CASE_CMP(codecInst.plname, "isac") == 0)
-    {
-        if(codecInst.plfreq == 16000)
-        {
-            _codecId = kCodecIsac;
-        }
-        else if(codecInst.plfreq == 32000)
-        {
-            _codecId = kCodecIsacSwb;
-        }
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+  else if (STR_CASE_CMP(codecInst.plname, "isac") == 0) {
+    if (codecInst.plfreq == 16000) {
+      _codecId = kCodecIsac;
+    } else if (codecInst.plfreq == 32000) {
+      _codecId = kCodecIsacSwb;
     }
+  }
 #endif
-    else if(STR_CASE_CMP(codecInst.plname, "G722") == 0)
-    {
-        _codecId = kCodecG722;
-    }
-    if(_codecId == kCodecNoCodec)
-    {
-        return -1;
-    }
-    memcpy(&codec_info_, &codecInst, sizeof(CodecInst));
-    return 0;
+  else if (STR_CASE_CMP(codecInst.plname, "G722") == 0) {
+    _codecId = kCodecG722;
+  }
+  if (_codecId == kCodecNoCodec) {
+    return -1;
+  }
+  memcpy(&codec_info_, &codecInst, sizeof(CodecInst));
+  return 0;
 }
 
 int32_t ModuleFileUtility::FileDurationMs(const char* fileName,
                                           const FileFormats fileFormat,
-                                          const uint32_t freqInHz)
-{
+                                          const uint32_t freqInHz) {
+  if (fileName == NULL) {
+    LOG(LS_ERROR) << "filename NULL";
+    return -1;
+  }
 
-    if(fileName == NULL)
-    {
-        LOG(LS_ERROR) << "filename NULL";
-        return -1;
-    }
-
-    int32_t time_in_ms = -1;
-    struct stat file_size;
-    if(stat(fileName,&file_size) == -1)
-    {
-        LOG(LS_ERROR) << "failed to retrieve file size with stat!";
-        return -1;
-    }
-    FileWrapper* inStreamObj = FileWrapper::Create();
-    if(inStreamObj == NULL)
-    {
-        LOG(LS_INFO) << "failed to create InStream object!";
-        return -1;
-    }
-    if (!inStreamObj->OpenFile(fileName, true)) {
-      delete inStreamObj;
-      LOG(LS_ERROR) << "failed to open file " << fileName << "!";
-      return -1;
-    }
-
-    switch (fileFormat)
-    {
-        case kFileFormatWavFile:
-        {
-            if(ReadWavHeader(*inStreamObj) == -1)
-            {
-                LOG(LS_ERROR) << "failed to read WAV file header!";
-                return -1;
-            }
-            time_in_ms = ((file_size.st_size - 44) /
-                          (_wavFormatObj.nAvgBytesPerSec/1000));
-            break;
-        }
-        case kFileFormatPcm16kHzFile:
-        {
-            // 16 samples per ms. 2 bytes per sample.
-            int32_t denominator = 16*2;
-            time_in_ms = (file_size.st_size)/denominator;
-            break;
-        }
-        case kFileFormatPcm8kHzFile:
-        {
-            // 8 samples per ms. 2 bytes per sample.
-            int32_t denominator = 8*2;
-            time_in_ms = (file_size.st_size)/denominator;
-            break;
-        }
-        case kFileFormatCompressedFile:
-        {
-            int32_t cnt = 0;
-            int read_len = 0;
-            char buf[64];
-            do
-            {
-                read_len = inStreamObj->Read(&buf[cnt++], 1);
-                if(read_len != 1)
-                {
-                    return -1;
-                }
-            } while ((buf[cnt-1] != '\n') && (64 > cnt));
-
-            if(cnt == 64)
-            {
-                return -1;
-            }
-            else
-            {
-                buf[cnt] = 0;
-            }
-#ifdef WEBRTC_CODEC_ILBC
-            if(!strcmp("#!iLBC20\n", buf))
-            {
-                // 20 ms is 304 bits
-                time_in_ms = ((file_size.st_size)*160)/304;
-                break;
-            }
-            if(!strcmp("#!iLBC30\n", buf))
-            {
-                // 30 ms takes 400 bits.
-                // file size in bytes * 8 / 400 is the number of
-                // 30 ms frames in the file ->
-                // time_in_ms = file size * 8 / 400 * 30
-                time_in_ms = ((file_size.st_size)*240)/400;
-                break;
-            }
-#endif
-            break;
-        }
-        case kFileFormatPreencodedFile:
-        {
-            LOG(LS_ERROR) << "cannot determine duration of Pre-Encoded file!";
-            break;
-        }
-        default:
-            LOG(LS_ERROR) << "unsupported file format " << fileFormat << "!";
-            break;
-    }
-    inStreamObj->CloseFile();
+  int32_t time_in_ms = -1;
+  struct stat file_size;
+  if (stat(fileName, &file_size) == -1) {
+    LOG(LS_ERROR) << "failed to retrieve file size with stat!";
+    return -1;
+  }
+  FileWrapper* inStreamObj = FileWrapper::Create();
+  if (inStreamObj == NULL) {
+    LOG(LS_INFO) << "failed to create InStream object!";
+    return -1;
+  }
+  if (!inStreamObj->OpenFile(fileName, true)) {
     delete inStreamObj;
-    return time_in_ms;
+    LOG(LS_ERROR) << "failed to open file " << fileName << "!";
+    return -1;
+  }
+
+  switch (fileFormat) {
+    case kFileFormatWavFile: {
+      if (ReadWavHeader(*inStreamObj) == -1) {
+        LOG(LS_ERROR) << "failed to read WAV file header!";
+        return -1;
+      }
+      time_in_ms =
+          ((file_size.st_size - 44) / (_wavFormatObj.nAvgBytesPerSec / 1000));
+      break;
+    }
+    case kFileFormatPcm16kHzFile: {
+      // 16 samples per ms. 2 bytes per sample.
+      int32_t denominator = 16 * 2;
+      time_in_ms = (file_size.st_size) / denominator;
+      break;
+    }
+    case kFileFormatPcm8kHzFile: {
+      // 8 samples per ms. 2 bytes per sample.
+      int32_t denominator = 8 * 2;
+      time_in_ms = (file_size.st_size) / denominator;
+      break;
+    }
+    case kFileFormatCompressedFile: {
+      int32_t cnt = 0;
+      int read_len = 0;
+      char buf[64];
+      do {
+        read_len = inStreamObj->Read(&buf[cnt++], 1);
+        if (read_len != 1) {
+          return -1;
+        }
+      } while ((buf[cnt - 1] != '\n') && (64 > cnt));
+
+      if (cnt == 64) {
+        return -1;
+      } else {
+        buf[cnt] = 0;
+      }
+#ifdef WEBRTC_CODEC_ILBC
+      if (!strcmp("#!iLBC20\n", buf)) {
+        // 20 ms is 304 bits
+        time_in_ms = ((file_size.st_size) * 160) / 304;
+        break;
+      }
+      if (!strcmp("#!iLBC30\n", buf)) {
+        // 30 ms takes 400 bits.
+        // file size in bytes * 8 / 400 is the number of
+        // 30 ms frames in the file ->
+        // time_in_ms = file size * 8 / 400 * 30
+        time_in_ms = ((file_size.st_size) * 240) / 400;
+        break;
+      }
+#endif
+      break;
+    }
+    case kFileFormatPreencodedFile: {
+      LOG(LS_ERROR) << "cannot determine duration of Pre-Encoded file!";
+      break;
+    }
+    default:
+      LOG(LS_ERROR) << "unsupported file format " << fileFormat << "!";
+      break;
+  }
+  inStreamObj->CloseFile();
+  delete inStreamObj;
+  return time_in_ms;
 }
 
-uint32_t ModuleFileUtility::PlayoutPositionMs()
-{
-    LOG(LS_VERBOSE) << "ModuleFileUtility::PlayoutPosition()";
+uint32_t ModuleFileUtility::PlayoutPositionMs() {
+  LOG(LS_VERBOSE) << "ModuleFileUtility::PlayoutPosition()";
 
-    return _reading ? _playoutPositionMs : 0;
+  return _reading ? _playoutPositionMs : 0;
 }
 }  // namespace webrtc
diff --git a/modules/video_capture/device_info_impl.cc b/modules/video_capture/device_info_impl.cc
index 03c6e1d..355e6a2 100644
--- a/modules/video_capture/device_info_impl.cc
+++ b/modules/video_capture/device_info_impl.cc
@@ -16,248 +16,231 @@
 #include "rtc_base/logging.h"
 
 #ifndef abs
-#define abs(a) (a>=0?a:-a)
+#define abs(a) (a >= 0 ? a : -a)
 #endif
 
-namespace webrtc
-{
-namespace videocapturemodule
-{
+namespace webrtc {
+namespace videocapturemodule {
 DeviceInfoImpl::DeviceInfoImpl()
-    : _apiLock(*RWLockWrapper::CreateRWLock()), _lastUsedDeviceName(NULL),
-      _lastUsedDeviceNameLength(0)
-{
+    : _apiLock(*RWLockWrapper::CreateRWLock()),
+      _lastUsedDeviceName(NULL),
+      _lastUsedDeviceNameLength(0) {}
+
+DeviceInfoImpl::~DeviceInfoImpl(void) {
+  _apiLock.AcquireLockExclusive();
+  free(_lastUsedDeviceName);
+  _apiLock.ReleaseLockExclusive();
+
+  delete &_apiLock;
 }
+int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
+  if (!deviceUniqueIdUTF8)
+    return -1;
 
-DeviceInfoImpl::~DeviceInfoImpl(void)
-{
-    _apiLock.AcquireLockExclusive();
-    free(_lastUsedDeviceName);
-    _apiLock.ReleaseLockExclusive();
+  _apiLock.AcquireLockShared();
 
-    delete &_apiLock;
-}
-int32_t DeviceInfoImpl::NumberOfCapabilities(
-                                        const char* deviceUniqueIdUTF8)
-{
-
-    if (!deviceUniqueIdUTF8)
-        return -1;
-
-    _apiLock.AcquireLockShared();
-
-    if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
-    {
-        // Is it the same device that is asked for again.
+  if (_lastUsedDeviceNameLength == strlen((char*)deviceUniqueIdUTF8)) {
+// Is it the same device that is asked for again.
 #if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
-        if(strncasecmp((char*)_lastUsedDeviceName,
-                       (char*) deviceUniqueIdUTF8,
-                       _lastUsedDeviceNameLength)==0)
+    if (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
+                    _lastUsedDeviceNameLength) == 0)
 #else
-        if (_strnicmp((char*) _lastUsedDeviceName,
-                      (char*) deviceUniqueIdUTF8,
-                      _lastUsedDeviceNameLength) == 0)
+    if (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
+                  _lastUsedDeviceNameLength) == 0)
 #endif
-        {
-            //yes
-            _apiLock.ReleaseLockShared();
-            return static_cast<int32_t>(_captureCapabilities.size());
-        }
+    {
+      // yes
+      _apiLock.ReleaseLockShared();
+      return static_cast<int32_t>(_captureCapabilities.size());
     }
-    // Need to get exclusive rights to create the new capability map.
-    _apiLock.ReleaseLockShared();
-    WriteLockScoped cs2(_apiLock);
+  }
+  // Need to get exclusive rights to create the new capability map.
+  _apiLock.ReleaseLockShared();
+  WriteLockScoped cs2(_apiLock);
 
-    int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
-    return ret;
+  int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
+  return ret;
 }
 
 int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
                                       const uint32_t deviceCapabilityNumber,
-                                      VideoCaptureCapability& capability)
-{
-    assert(deviceUniqueIdUTF8 != NULL);
+                                      VideoCaptureCapability& capability) {
+  assert(deviceUniqueIdUTF8 != NULL);
 
-    ReadLockScoped cs(_apiLock);
+  ReadLockScoped cs(_apiLock);
 
-    if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
+  if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
 #if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
-        || (strncasecmp((char*)_lastUsedDeviceName,
-                        (char*) deviceUniqueIdUTF8,
-                        _lastUsedDeviceNameLength)!=0))
-#else
-        || (_strnicmp((char*) _lastUsedDeviceName,
-                      (char*) deviceUniqueIdUTF8,
+      || (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
+#else
+      || (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
+                    _lastUsedDeviceNameLength) != 0))
 #endif
 
-    {
-        _apiLock.ReleaseLockShared();
-        _apiLock.AcquireLockExclusive();
-        if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8))
-        {
-            _apiLock.ReleaseLockExclusive();
-            _apiLock.AcquireLockShared();
-            return -1;
-        }
-        _apiLock.ReleaseLockExclusive();
-        _apiLock.AcquireLockShared();
+  {
+    _apiLock.ReleaseLockShared();
+    _apiLock.AcquireLockExclusive();
+    if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
+      _apiLock.ReleaseLockExclusive();
+      _apiLock.AcquireLockShared();
+      return -1;
     }
+    _apiLock.ReleaseLockExclusive();
+    _apiLock.AcquireLockShared();
+  }
 
-    // Make sure the number is valid
-    if (deviceCapabilityNumber >= (unsigned int) _captureCapabilities.size())
-    {
-        LOG(LS_ERROR) << "Invalid deviceCapabilityNumber "
-                      << deviceCapabilityNumber << ">= number of capabilities ("
-                      << _captureCapabilities.size() << ").";
-        return -1;
-    }
+  // Make sure the number is valid
+  if (deviceCapabilityNumber >= (unsigned int)_captureCapabilities.size()) {
+    LOG(LS_ERROR) << "Invalid deviceCapabilityNumber " << deviceCapabilityNumber
+                  << ">= number of capabilities ("
+                  << _captureCapabilities.size() << ").";
+    return -1;
+  }
 
-    capability = _captureCapabilities[deviceCapabilityNumber];
-    return 0;
+  capability = _captureCapabilities[deviceCapabilityNumber];
+  return 0;
 }
 
 int32_t DeviceInfoImpl::GetBestMatchedCapability(
-                                        const char*deviceUniqueIdUTF8,
-                                        const VideoCaptureCapability& requested,
-                                        VideoCaptureCapability& resulting)
-{
+    const char* deviceUniqueIdUTF8,
+    const VideoCaptureCapability& requested,
+    VideoCaptureCapability& resulting) {
+  if (!deviceUniqueIdUTF8)
+    return -1;
 
-
-    if (!deviceUniqueIdUTF8)
-        return -1;
-
-    ReadLockScoped cs(_apiLock);
-    if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
+  ReadLockScoped cs(_apiLock);
+  if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
 #if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
-        || (strncasecmp((char*)_lastUsedDeviceName,
-                        (char*) deviceUniqueIdUTF8,
-                        _lastUsedDeviceNameLength)!=0))
-#else
-        || (_strnicmp((char*) _lastUsedDeviceName,
-                      (char*) deviceUniqueIdUTF8,
+      || (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
+#else
+      || (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
+                    _lastUsedDeviceNameLength) != 0))
 #endif
-    {
-        _apiLock.ReleaseLockShared();
-        _apiLock.AcquireLockExclusive();
-        if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8))
-        {
-            return -1;
-        }
-        _apiLock.ReleaseLockExclusive();
-        _apiLock.AcquireLockShared();
+  {
+    _apiLock.ReleaseLockShared();
+    _apiLock.AcquireLockExclusive();
+    if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
+      return -1;
     }
+    _apiLock.ReleaseLockExclusive();
+    _apiLock.AcquireLockShared();
+  }
 
-    int32_t bestformatIndex = -1;
-    int32_t bestWidth = 0;
-    int32_t bestHeight = 0;
-    int32_t bestFrameRate = 0;
-    VideoType bestVideoType = VideoType::kUnknown;
+  int32_t bestformatIndex = -1;
+  int32_t bestWidth = 0;
+  int32_t bestHeight = 0;
+  int32_t bestFrameRate = 0;
+  VideoType bestVideoType = VideoType::kUnknown;
 
-    const int32_t numberOfCapabilies =
-        static_cast<int32_t>(_captureCapabilities.size());
+  const int32_t numberOfCapabilies =
+      static_cast<int32_t>(_captureCapabilities.size());
 
-    for (int32_t tmp = 0; tmp < numberOfCapabilies; ++tmp) // Loop through all capabilities
-    {
-        VideoCaptureCapability& capability = _captureCapabilities[tmp];
+  for (int32_t tmp = 0; tmp < numberOfCapabilies;
+       ++tmp)  // Loop through all capabilities
+  {
+    VideoCaptureCapability& capability = _captureCapabilities[tmp];
 
-        const int32_t diffWidth = capability.width - requested.width;
-        const int32_t diffHeight = capability.height - requested.height;
-        const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
+    const int32_t diffWidth = capability.width - requested.width;
+    const int32_t diffHeight = capability.height - requested.height;
+    const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
 
-        const int32_t currentbestDiffWith = bestWidth - requested.width;
-        const int32_t currentbestDiffHeight = bestHeight - requested.height;
-        const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
+    const int32_t currentbestDiffWith = bestWidth - requested.width;
+    const int32_t currentbestDiffHeight = bestHeight - requested.height;
+    const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
 
-        if ((diffHeight >= 0 && diffHeight <= abs(currentbestDiffHeight)) // Height better or equalt that previouse.
-            || (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight))
-        {
-
-            if (diffHeight == currentbestDiffHeight) // Found best height. Care about the width)
-            {
-                if ((diffWidth >= 0 && diffWidth <= abs(currentbestDiffWith)) // Width better or equal
-                    || (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith))
-                {
-                    if (diffWidth == currentbestDiffWith && diffHeight
-                        == currentbestDiffHeight) // Same size as previously
-                    {
-                        //Also check the best frame rate if the diff is the same as previouse
-                        if (((diffFrameRate >= 0 &&
-                              diffFrameRate <= currentbestDiffFrameRate) // Frame rate to high but better match than previouse and we have not selected IUV
-                            ||
-                            (currentbestDiffFrameRate < 0 &&
-                             diffFrameRate >= currentbestDiffFrameRate)) // Current frame rate is lower than requested. This is better.
-                        )
-                        {
-                            if ((currentbestDiffFrameRate == diffFrameRate) // Same frame rate as previous  or frame rate allready good enough
-                                || (currentbestDiffFrameRate >= 0))
-                            {
-                              if (bestVideoType != requested.videoType &&
-                                  requested.videoType != VideoType::kUnknown &&
-                                  (capability.videoType ==
-                                       requested.videoType ||
-                                   capability.videoType == VideoType::kI420 ||
-                                   capability.videoType == VideoType::kYUY2 ||
-                                   capability.videoType == VideoType::kYV12)) {
-                                bestVideoType = capability.videoType;
-                                bestformatIndex = tmp;
-                                }
-                                // If width height and frame rate is full filled we can use the camera for encoding if it is supported.
-                                if (capability.height == requested.height
-                                    && capability.width == requested.width
-                                    && capability.maxFPS >= requested.maxFPS)
-                                {
-                                  bestformatIndex = tmp;
-                                }
-                            }
-                            else // Better frame rate
-                            {
-                                bestWidth = capability.width;
-                                bestHeight = capability.height;
-                                bestFrameRate = capability.maxFPS;
-                                bestVideoType = capability.videoType;
-                                bestformatIndex = tmp;
-                            }
-                        }
-                    }
-                    else // Better width than previously
-                    {
-                        bestWidth = capability.width;
-                        bestHeight = capability.height;
-                        bestFrameRate = capability.maxFPS;
-                        bestVideoType = capability.videoType;
-                        bestformatIndex = tmp;
-                    }
-                }// else width no good
-            }
-            else // Better height
-            {
+    if ((diffHeight >= 0 &&
+         diffHeight <= abs(currentbestDiffHeight))  // Height better or equalt
+                                                    // that previouse.
+        || (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight)) {
+      if (diffHeight ==
+          currentbestDiffHeight)  // Found best height. Care about the width)
+      {
+        if ((diffWidth >= 0 &&
+             diffWidth <= abs(currentbestDiffWith))  // Width better or equal
+            || (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith)) {
+          if (diffWidth == currentbestDiffWith &&
+              diffHeight == currentbestDiffHeight)  // Same size as previously
+          {
+            // Also check the best frame rate if the diff is the same as
+            // previouse
+            if (((diffFrameRate >= 0 &&
+                  diffFrameRate <=
+                      currentbestDiffFrameRate)  // Frame rate to high but
+                                                 // better match than previouse
+                                                 // and we have not selected IUV
+                 || (currentbestDiffFrameRate < 0 &&
+                     diffFrameRate >=
+                         currentbestDiffFrameRate))  // Current frame rate is
+                                                     // lower than requested.
+                                                     // This is better.
+                ) {
+              if ((currentbestDiffFrameRate ==
+                   diffFrameRate)  // Same frame rate as previous  or frame rate
+                                   // allready good enough
+                  || (currentbestDiffFrameRate >= 0)) {
+                if (bestVideoType != requested.videoType &&
+                    requested.videoType != VideoType::kUnknown &&
+                    (capability.videoType == requested.videoType ||
+                     capability.videoType == VideoType::kI420 ||
+                     capability.videoType == VideoType::kYUY2 ||
+                     capability.videoType == VideoType::kYV12)) {
+                  bestVideoType = capability.videoType;
+                  bestformatIndex = tmp;
+                }
+                // If width height and frame rate is full filled we can use the
+                // camera for encoding if it is supported.
+                if (capability.height == requested.height &&
+                    capability.width == requested.width &&
+                    capability.maxFPS >= requested.maxFPS) {
+                  bestformatIndex = tmp;
+                }
+              } else  // Better frame rate
+              {
                 bestWidth = capability.width;
                 bestHeight = capability.height;
                 bestFrameRate = capability.maxFPS;
                 bestVideoType = capability.videoType;
                 bestformatIndex = tmp;
+              }
             }
-        }// else height not good
-    }//end for
+          } else  // Better width than previously
+          {
+            bestWidth = capability.width;
+            bestHeight = capability.height;
+            bestFrameRate = capability.maxFPS;
+            bestVideoType = capability.videoType;
+            bestformatIndex = tmp;
+          }
+        }     // else width no good
+      } else  // Better height
+      {
+        bestWidth = capability.width;
+        bestHeight = capability.height;
+        bestFrameRate = capability.maxFPS;
+        bestVideoType = capability.videoType;
+        bestformatIndex = tmp;
+      }
+    }  // else height not good
+  }    // end for
 
-    LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x" << bestHeight
-                    << "@" << bestFrameRate
-                    << "fps, color format: " << static_cast<int>(bestVideoType);
+  LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x" << bestHeight
+                  << "@" << bestFrameRate
+                  << "fps, color format: " << static_cast<int>(bestVideoType);
 
-    // Copy the capability
-    if (bestformatIndex < 0)
-        return -1;
-    resulting = _captureCapabilities[bestformatIndex];
-    return bestformatIndex;
+  // Copy the capability
+  if (bestformatIndex < 0)
+    return -1;
+  resulting = _captureCapabilities[bestformatIndex];
+  return bestformatIndex;
 }
 
-//Default implementation. This should be overridden by Mobile implementations.
+// Default implementation. This should be overridden by Mobile implementations.
 int32_t DeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
                                        VideoRotation& orientation) {
   orientation = kVideoRotation_0;
-    return -1;
+  return -1;
 }
 }  // namespace videocapturemodule
 }  // namespace webrtc
diff --git a/modules/video_capture/linux/device_info_linux.cc b/modules/video_capture/linux/device_info_linux.cc
index 719a637..b5ad58c 100644
--- a/modules/video_capture/linux/device_info_linux.cc
+++ b/modules/video_capture/linux/device_info_linux.cc
@@ -17,304 +17,250 @@
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <unistd.h>
-//v4l includes
+// v4l includes
 #include <linux/videodev2.h>
 
 #include "rtc_base/logging.h"
 
-
-namespace webrtc
-{
-namespace videocapturemodule
-{
-VideoCaptureModule::DeviceInfo*
-VideoCaptureImpl::CreateDeviceInfo()
-{
-    return new videocapturemodule::DeviceInfoLinux();
+namespace webrtc {
+namespace videocapturemodule {
+VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
+  return new videocapturemodule::DeviceInfoLinux();
 }
 
-DeviceInfoLinux::DeviceInfoLinux()
-    : DeviceInfoImpl()
-{
+DeviceInfoLinux::DeviceInfoLinux() : DeviceInfoImpl() {}
+
+int32_t DeviceInfoLinux::Init() {
+  return 0;
 }
 
-int32_t DeviceInfoLinux::Init()
-{
-    return 0;
-}
+DeviceInfoLinux::~DeviceInfoLinux() {}
 
-DeviceInfoLinux::~DeviceInfoLinux()
-{
-}
+uint32_t DeviceInfoLinux::NumberOfDevices() {
+  LOG(LS_INFO) << __FUNCTION__;
 
-uint32_t DeviceInfoLinux::NumberOfDevices()
-{
-    LOG(LS_INFO) << __FUNCTION__;
+  uint32_t count = 0;
+  char device[20];
+  int fd = -1;
 
-    uint32_t count = 0;
-    char device[20];
-    int fd = -1;
-
-    /* detect /dev/video [0-63]VideoCaptureModule entries */
-    for (int n = 0; n < 64; n++)
-    {
-        sprintf(device, "/dev/video%d", n);
-        if ((fd = open(device, O_RDONLY)) != -1)
-        {
-            close(fd);
-            count++;
-        }
+  /* detect /dev/video [0-63]VideoCaptureModule entries */
+  for (int n = 0; n < 64; n++) {
+    sprintf(device, "/dev/video%d", n);
+    if ((fd = open(device, O_RDONLY)) != -1) {
+      close(fd);
+      count++;
     }
+  }
 
-    return count;
+  return count;
 }
 
-int32_t DeviceInfoLinux::GetDeviceName(
-                                         uint32_t deviceNumber,
-                                         char* deviceNameUTF8,
-                                         uint32_t deviceNameLength,
-                                         char* deviceUniqueIdUTF8,
-                                         uint32_t deviceUniqueIdUTF8Length,
-                                         char* /*productUniqueIdUTF8*/,
-                                         uint32_t /*productUniqueIdUTF8Length*/)
-{
-    LOG(LS_INFO) << __FUNCTION__;
+int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber,
+                                       char* deviceNameUTF8,
+                                       uint32_t deviceNameLength,
+                                       char* deviceUniqueIdUTF8,
+                                       uint32_t deviceUniqueIdUTF8Length,
+                                       char* /*productUniqueIdUTF8*/,
+                                       uint32_t /*productUniqueIdUTF8Length*/) {
+  LOG(LS_INFO) << __FUNCTION__;
 
-    // Travel through /dev/video [0-63]
-    uint32_t count = 0;
-    char device[20];
-    int fd = -1;
-    bool found = false;
-    for (int n = 0; n < 64; n++)
-    {
-        sprintf(device, "/dev/video%d", n);
-        if ((fd = open(device, O_RDONLY)) != -1)
-        {
-            if (count == deviceNumber) {
-                // Found the device
-                found = true;
-                break;
-            } else {
-                close(fd);
-                count++;
-            }
-        }
+  // Travel through /dev/video [0-63]
+  uint32_t count = 0;
+  char device[20];
+  int fd = -1;
+  bool found = false;
+  for (int n = 0; n < 64; n++) {
+    sprintf(device, "/dev/video%d", n);
+    if ((fd = open(device, O_RDONLY)) != -1) {
+      if (count == deviceNumber) {
+        // Found the device
+        found = true;
+        break;
+      } else {
+        close(fd);
+        count++;
+      }
     }
+  }
 
-    if (!found)
-        return -1;
+  if (!found)
+    return -1;
+
+  // query device capabilities
+  struct v4l2_capability cap;
+  if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
+    LOG(LS_INFO) << "error in querying the device capability for device "
+                 << device << ". errno = " << errno;
+    close(fd);
+    return -1;
+  }
+
+  close(fd);
+
+  char cameraName[64];
+  memset(deviceNameUTF8, 0, deviceNameLength);
+  memcpy(cameraName, cap.card, sizeof(cap.card));
+
+  if (deviceNameLength >= strlen(cameraName)) {
+    memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
+  } else {
+    LOG(LS_INFO) << "buffer passed is too small";
+    return -1;
+  }
+
+  if (cap.bus_info[0] != 0)  // may not available in all drivers
+  {
+    // copy device id
+    if (deviceUniqueIdUTF8Length >= strlen((const char*)cap.bus_info)) {
+      memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
+      memcpy(deviceUniqueIdUTF8, cap.bus_info,
+             strlen((const char*)cap.bus_info));
+    } else {
+      LOG(LS_INFO) << "buffer passed is too small";
+      return -1;
+    }
+  }
+
+  return 0;
+}
+
+int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
+  int fd;
+  char device[32];
+  bool found = false;
+
+  const int32_t deviceUniqueIdUTF8Length =
+      (int32_t)strlen((char*)deviceUniqueIdUTF8);
+  if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
+    LOG(LS_INFO) << "Device name too long";
+    return -1;
+  }
+  LOG(LS_INFO) << "CreateCapabilityMap called for device "
+               << deviceUniqueIdUTF8;
+
+  /* detect /dev/video [0-63] entries */
+  for (int n = 0; n < 64; ++n) {
+    sprintf(device, "/dev/video%d", n);
+    fd = open(device, O_RDONLY);
+    if (fd == -1)
+      continue;
 
     // query device capabilities
     struct v4l2_capability cap;
-    if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0)
-    {
-        LOG(LS_INFO) << "error in querying the device capability for device "
-                     << device << ". errno = " << errno;
-        close(fd);
-        return -1;
-    }
-
-    close(fd);
-
-    char cameraName[64];
-    memset(deviceNameUTF8, 0, deviceNameLength);
-    memcpy(cameraName, cap.card, sizeof(cap.card));
-
-    if (deviceNameLength >= strlen(cameraName))
-    {
-        memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
-    }
-    else
-    {
-        LOG(LS_INFO) << "buffer passed is too small";
-        return -1;
-    }
-
-    if (cap.bus_info[0] != 0) // may not available in all drivers
-    {
-        // copy device id
-        if (deviceUniqueIdUTF8Length >= strlen((const char*) cap.bus_info))
+    if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
+      if (cap.bus_info[0] != 0) {
+        if (strncmp((const char*)cap.bus_info, (const char*)deviceUniqueIdUTF8,
+                    strlen((const char*)deviceUniqueIdUTF8)) ==
+            0)  // match with device id
         {
-            memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
-            memcpy(deviceUniqueIdUTF8, cap.bus_info,
-                   strlen((const char*) cap.bus_info));
+          found = true;
+          break;  // fd matches with device unique id supplied
         }
-        else
-        {
-            LOG(LS_INFO) << "buffer passed is too small";
-            return -1;
+      } else  // match for device name
+      {
+        if (IsDeviceNameMatches((const char*)cap.card,
+                                (const char*)deviceUniqueIdUTF8)) {
+          found = true;
+          break;
         }
+      }
     }
+    close(fd);  // close since this is not the matching device
+  }
 
-    return 0;
-}
+  if (!found) {
+    LOG(LS_INFO) << "no matching device found";
+    return -1;
+  }
 
-int32_t DeviceInfoLinux::CreateCapabilityMap(
-                                        const char* deviceUniqueIdUTF8)
-{
-    int fd;
-    char device[32];
-    bool found = false;
+  // now fd will point to the matching device
+  // reset old capability list.
+  _captureCapabilities.clear();
 
-    const int32_t deviceUniqueIdUTF8Length =
-                            (int32_t) strlen((char*) deviceUniqueIdUTF8);
-    if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
-    {
-        LOG(LS_INFO) << "Device name too long";
-        return -1;
-    }
-    LOG(LS_INFO) << "CreateCapabilityMap called for device "
-                 << deviceUniqueIdUTF8;
+  int size = FillCapabilities(fd);
+  close(fd);
 
-    /* detect /dev/video [0-63] entries */
-    for (int n = 0; n < 64; ++n)
-    {
-        sprintf(device, "/dev/video%d", n);
-        fd = open(device, O_RDONLY);
-        if (fd == -1)
-          continue;
+  // Store the new used device name
+  _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
+  _lastUsedDeviceName =
+      (char*)realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1);
+  memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
+         _lastUsedDeviceNameLength + 1);
 
-        // query device capabilities
-        struct v4l2_capability cap;
-        if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
-        {
-            if (cap.bus_info[0] != 0)
-            {
-                if (strncmp((const char*) cap.bus_info,
-                            (const char*) deviceUniqueIdUTF8,
-                            strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
-                {
-                    found = true;
-                    break; // fd matches with device unique id supplied
-                }
-            }
-            else //match for device name
-            {
-                if (IsDeviceNameMatches((const char*) cap.card,
-                                        (const char*) deviceUniqueIdUTF8))
-                {
-                    found = true;
-                    break;
-                }
-            }
-        }
-        close(fd); // close since this is not the matching device
-    }
+  LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
 
-    if (!found)
-    {
-        LOG(LS_INFO) << "no matching device found";
-        return -1;
-    }
-
-    // now fd will point to the matching device
-    // reset old capability list.
-    _captureCapabilities.clear();
-
-    int size = FillCapabilities(fd);
-    close(fd);
-
-    // Store the new used device name
-    _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
-    _lastUsedDeviceName = (char*) realloc(_lastUsedDeviceName,
-                                                   _lastUsedDeviceNameLength + 1);
-    memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8, _lastUsedDeviceNameLength + 1);
-
-    LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
-
-    return size;
+  return size;
 }
 
 bool DeviceInfoLinux::IsDeviceNameMatches(const char* name,
-                                          const char* deviceUniqueIdUTF8)
-{
-    if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
-            return true;
-    return false;
+                                          const char* deviceUniqueIdUTF8) {
+  if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
+    return true;
+  return false;
 }
 
-int32_t DeviceInfoLinux::FillCapabilities(int fd)
-{
+int32_t DeviceInfoLinux::FillCapabilities(int fd) {
+  // set image format
+  struct v4l2_format video_fmt;
+  memset(&video_fmt, 0, sizeof(struct v4l2_format));
 
-    // set image format
-    struct v4l2_format video_fmt;
-    memset(&video_fmt, 0, sizeof(struct v4l2_format));
+  video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  video_fmt.fmt.pix.sizeimage = 0;
 
-    video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    video_fmt.fmt.pix.sizeimage = 0;
+  int totalFmts = 4;
+  unsigned int videoFormats[] = {V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_YUV420,
+                                 V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY};
 
-    int totalFmts = 4;
-    unsigned int videoFormats[] = {
-        V4L2_PIX_FMT_MJPEG,
-        V4L2_PIX_FMT_YUV420,
-        V4L2_PIX_FMT_YUYV,
-        V4L2_PIX_FMT_UYVY };
+  int sizes = 13;
+  unsigned int size[][2] = {{128, 96},   {160, 120},  {176, 144},  {320, 240},
+                            {352, 288},  {640, 480},  {704, 576},  {800, 600},
+                            {960, 720},  {1280, 720}, {1024, 768}, {1440, 1080},
+                            {1920, 1080}};
 
-    int sizes = 13;
-    unsigned int size[][2] = { { 128, 96 }, { 160, 120 }, { 176, 144 },
-                               { 320, 240 }, { 352, 288 }, { 640, 480 },
-                               { 704, 576 }, { 800, 600 }, { 960, 720 },
-                               { 1280, 720 }, { 1024, 768 }, { 1440, 1080 },
-                               { 1920, 1080 } };
+  int index = 0;
+  for (int fmts = 0; fmts < totalFmts; fmts++) {
+    for (int i = 0; i < sizes; i++) {
+      video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
+      video_fmt.fmt.pix.width = size[i][0];
+      video_fmt.fmt.pix.height = size[i][1];
 
-    int index = 0;
-    for (int fmts = 0; fmts < totalFmts; fmts++)
-    {
-        for (int i = 0; i < sizes; i++)
-        {
-            video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
-            video_fmt.fmt.pix.width = size[i][0];
-            video_fmt.fmt.pix.height = size[i][1];
+      if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0) {
+        if ((video_fmt.fmt.pix.width == size[i][0]) &&
+            (video_fmt.fmt.pix.height == size[i][1])) {
+          VideoCaptureCapability cap;
+          cap.width = video_fmt.fmt.pix.width;
+          cap.height = video_fmt.fmt.pix.height;
+          if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV) {
+            cap.videoType = VideoType::kYUY2;
+          } else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420) {
+            cap.videoType = VideoType::kI420;
+          } else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG) {
+            cap.videoType = VideoType::kMJPEG;
+          } else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY) {
+            cap.videoType = VideoType::kUYVY;
+          }
 
-            if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0)
-            {
-                if ((video_fmt.fmt.pix.width == size[i][0])
-                    && (video_fmt.fmt.pix.height == size[i][1]))
-                {
-                    VideoCaptureCapability cap;
-                    cap.width = video_fmt.fmt.pix.width;
-                    cap.height = video_fmt.fmt.pix.height;
-                    if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV)
-                    {
-                      cap.videoType = VideoType::kYUY2;
-                    }
-                    else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420)
-                    {
-                      cap.videoType = VideoType::kI420;
-                    }
-                    else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG)
-                    {
-                      cap.videoType = VideoType::kMJPEG;
-                    }
-                    else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY)
-                    {
-                      cap.videoType = VideoType::kUYVY;
-                    }
+          // get fps of current camera mode
+          // V4l2 does not have a stable method of knowing so we just guess.
+          if (cap.width >= 800 && cap.videoType != VideoType::kMJPEG) {
+            cap.maxFPS = 15;
+          } else {
+            cap.maxFPS = 30;
+          }
 
-                    // get fps of current camera mode
-                    // V4l2 does not have a stable method of knowing so we just guess.
-                    if (cap.width >= 800 &&
-                        cap.videoType != VideoType::kMJPEG) {
-                      cap.maxFPS = 15;
-                    }
-                    else
-                    {
-                        cap.maxFPS = 30;
-                    }
-
-                    _captureCapabilities.push_back(cap);
-                    index++;
-                    LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
-                                    << " height:" << cap.height << " type:"
-                                    << static_cast<int32_t>(cap.videoType)
-                                    << " fps:" << cap.maxFPS;
-                }
-            }
+          _captureCapabilities.push_back(cap);
+          index++;
+          LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
+                          << " height:" << cap.height
+                          << " type:" << static_cast<int32_t>(cap.videoType)
+                          << " fps:" << cap.maxFPS;
         }
+      }
     }
+  }
 
-    LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
-    return _captureCapabilities.size();
+  LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+  return _captureCapabilities.size();
 }
 
 }  // namespace videocapturemodule
diff --git a/modules/video_capture/linux/video_capture_linux.cc b/modules/video_capture/linux/video_capture_linux.cc
index 80810f2..f034f80 100644
--- a/modules/video_capture/linux/video_capture_linux.cc
+++ b/modules/video_capture/linux/video_capture_linux.cc
@@ -24,22 +24,22 @@
 #include <new>
 
 #include "media/base/videocommon.h"
+#include "rtc_base/logging.h"
 #include "rtc_base/refcount.h"
 #include "rtc_base/refcountedobject.h"
 #include "rtc_base/scoped_ref_ptr.h"
-#include "rtc_base/logging.h"
 
 namespace webrtc {
 namespace videocapturemodule {
 rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
     const char* deviceUniqueId) {
-    rtc::scoped_refptr<VideoCaptureModuleV4L2> implementation(
-        new rtc::RefCountedObject<VideoCaptureModuleV4L2>());
+  rtc::scoped_refptr<VideoCaptureModuleV4L2> implementation(
+      new rtc::RefCountedObject<VideoCaptureModuleV4L2>());
 
-    if (implementation->Init(deviceUniqueId) != 0)
-        return nullptr;
+  if (implementation->Init(deviceUniqueId) != 0)
+    return nullptr;
 
-    return implementation;
+  return implementation;
 }
 
 VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
@@ -54,407 +54,369 @@
       _captureVideoType(VideoType::kI420),
       _pool(NULL) {}
 
-int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8)
-{
-    int len = strlen((const char*) deviceUniqueIdUTF8);
-    _deviceUniqueId = new (std::nothrow) char[len + 1];
-    if (_deviceUniqueId)
-    {
-        memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
-    }
+int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
+  int len = strlen((const char*)deviceUniqueIdUTF8);
+  _deviceUniqueId = new (std::nothrow) char[len + 1];
+  if (_deviceUniqueId) {
+    memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
+  }
 
-    int fd;
-    char device[32];
-    bool found = false;
+  int fd;
+  char device[32];
+  bool found = false;
 
-    /* detect /dev/video [0-63] entries */
-    int n;
-    for (n = 0; n < 64; n++)
-    {
-        sprintf(device, "/dev/video%d", n);
-        if ((fd = open(device, O_RDONLY)) != -1)
-        {
-            // query device capabilities
-            struct v4l2_capability cap;
-            if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
-            {
-                if (cap.bus_info[0] != 0)
-                {
-                    if (strncmp((const char*) cap.bus_info,
-                                (const char*) deviceUniqueIdUTF8,
-                                strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
-                    {
-                        close(fd);
-                        found = true;
-                        break; // fd matches with device unique id supplied
-                    }
-                }
-            }
-            close(fd); // close since this is not the matching device
+  /* detect /dev/video [0-63] entries */
+  int n;
+  for (n = 0; n < 64; n++) {
+    sprintf(device, "/dev/video%d", n);
+    if ((fd = open(device, O_RDONLY)) != -1) {
+      // query device capabilities
+      struct v4l2_capability cap;
+      if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
+        if (cap.bus_info[0] != 0) {
+          if (strncmp((const char*)cap.bus_info,
+                      (const char*)deviceUniqueIdUTF8,
+                      strlen((const char*)deviceUniqueIdUTF8)) ==
+              0)  // match with device id
+          {
+            close(fd);
+            found = true;
+            break;  // fd matches with device unique id supplied
+          }
         }
+      }
+      close(fd);  // close since this is not the matching device
     }
-    if (!found)
-    {
-        LOG(LS_INFO) << "no matching device found";
-        return -1;
-    }
-    _deviceId = n; //store the device id
-    return 0;
+  }
+  if (!found) {
+    LOG(LS_INFO) << "no matching device found";
+    return -1;
+  }
+  _deviceId = n;  // store the device id
+  return 0;
 }
 
-VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2()
-{
-    StopCapture();
-    if (_deviceFd != -1)
-      close(_deviceFd);
+VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
+  StopCapture();
+  if (_deviceFd != -1)
+    close(_deviceFd);
 }
 
 int32_t VideoCaptureModuleV4L2::StartCapture(
-    const VideoCaptureCapability& capability)
-{
-    if (_captureStarted)
-    {
-      if (capability.width == _currentWidth &&
-          capability.height == _currentHeight &&
-          _captureVideoType == capability.videoType) {
-        return 0;
-        }
-        else
-        {
-            StopCapture();
-        }
-    }
-
-    rtc::CritScope cs(&_captureCritSect);
-    //first open /dev/video device
-    char device[20];
-    sprintf(device, "/dev/video%d", (int) _deviceId);
-
-    if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0)
-    {
-        LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
-        return -1;
-    }
-
-    // Supported video formats in preferred order.
-    // If the requested resolution is larger than VGA, we prefer MJPEG. Go for
-    // I420 otherwise.
-    const int nFormats = 5;
-    unsigned int fmts[nFormats];
-    if (capability.width > 640 || capability.height > 480) {
-        fmts[0] = V4L2_PIX_FMT_MJPEG;
-        fmts[1] = V4L2_PIX_FMT_YUV420;
-        fmts[2] = V4L2_PIX_FMT_YUYV;
-        fmts[3] = V4L2_PIX_FMT_UYVY;
-        fmts[4] = V4L2_PIX_FMT_JPEG;
+    const VideoCaptureCapability& capability) {
+  if (_captureStarted) {
+    if (capability.width == _currentWidth &&
+        capability.height == _currentHeight &&
+        _captureVideoType == capability.videoType) {
+      return 0;
     } else {
-        fmts[0] = V4L2_PIX_FMT_YUV420;
-        fmts[1] = V4L2_PIX_FMT_YUYV;
-        fmts[2] = V4L2_PIX_FMT_UYVY;
-        fmts[3] = V4L2_PIX_FMT_MJPEG;
-        fmts[4] = V4L2_PIX_FMT_JPEG;
+      StopCapture();
     }
+  }
 
-    // Enumerate image formats.
-    struct v4l2_fmtdesc fmt;
-    int fmtsIdx = nFormats;
-    memset(&fmt, 0, sizeof(fmt));
-    fmt.index = 0;
-    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
-    while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
-        LOG(LS_INFO) << "  { pixelformat = "
-                     << cricket::GetFourccName(fmt.pixelformat)
-                     << ", description = '" << fmt.description << "' }";
-        // Match the preferred order.
-        for (int i = 0; i < nFormats; i++) {
-            if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
-                fmtsIdx = i;
-        }
-        // Keep enumerating.
-        fmt.index++;
+  rtc::CritScope cs(&_captureCritSect);
+  // first open /dev/video device
+  char device[20];
+  sprintf(device, "/dev/video%d", (int)_deviceId);
+
+  if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0) {
+    LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
+    return -1;
+  }
+
+  // Supported video formats in preferred order.
+  // If the requested resolution is larger than VGA, we prefer MJPEG. Go for
+  // I420 otherwise.
+  const int nFormats = 5;
+  unsigned int fmts[nFormats];
+  if (capability.width > 640 || capability.height > 480) {
+    fmts[0] = V4L2_PIX_FMT_MJPEG;
+    fmts[1] = V4L2_PIX_FMT_YUV420;
+    fmts[2] = V4L2_PIX_FMT_YUYV;
+    fmts[3] = V4L2_PIX_FMT_UYVY;
+    fmts[4] = V4L2_PIX_FMT_JPEG;
+  } else {
+    fmts[0] = V4L2_PIX_FMT_YUV420;
+    fmts[1] = V4L2_PIX_FMT_YUYV;
+    fmts[2] = V4L2_PIX_FMT_UYVY;
+    fmts[3] = V4L2_PIX_FMT_MJPEG;
+    fmts[4] = V4L2_PIX_FMT_JPEG;
+  }
+
+  // Enumerate image formats.
+  struct v4l2_fmtdesc fmt;
+  int fmtsIdx = nFormats;
+  memset(&fmt, 0, sizeof(fmt));
+  fmt.index = 0;
+  fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
+  while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
+    LOG(LS_INFO) << "  { pixelformat = "
+                 << cricket::GetFourccName(fmt.pixelformat)
+                 << ", description = '" << fmt.description << "' }";
+    // Match the preferred order.
+    for (int i = 0; i < nFormats; i++) {
+      if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
+        fmtsIdx = i;
     }
+    // Keep enumerating.
+    fmt.index++;
+  }
 
-    if (fmtsIdx == nFormats)
-    {
-        LOG(LS_INFO) << "no supporting video formats found";
-        return -1;
-    } else {
-        LOG(LS_INFO) << "We prefer format "
-                     << cricket::GetFourccName(fmts[fmtsIdx]);
-    }
+  if (fmtsIdx == nFormats) {
+    LOG(LS_INFO) << "no supporting video formats found";
+    return -1;
+  } else {
+    LOG(LS_INFO) << "We prefer format "
+                 << cricket::GetFourccName(fmts[fmtsIdx]);
+  }
 
-    struct v4l2_format video_fmt;
-    memset(&video_fmt, 0, sizeof(struct v4l2_format));
-    video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    video_fmt.fmt.pix.sizeimage = 0;
-    video_fmt.fmt.pix.width = capability.width;
-    video_fmt.fmt.pix.height = capability.height;
-    video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
+  struct v4l2_format video_fmt;
+  memset(&video_fmt, 0, sizeof(struct v4l2_format));
+  video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  video_fmt.fmt.pix.sizeimage = 0;
+  video_fmt.fmt.pix.width = capability.width;
+  video_fmt.fmt.pix.height = capability.height;
+  video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
 
-    if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
-      _captureVideoType = VideoType::kYUY2;
-    else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
-      _captureVideoType = VideoType::kI420;
-    else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
-      _captureVideoType = VideoType::kUYVY;
-    else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
-             video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
-      _captureVideoType = VideoType::kMJPEG;
+  if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
+    _captureVideoType = VideoType::kYUY2;
+  else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+    _captureVideoType = VideoType::kI420;
+  else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
+    _captureVideoType = VideoType::kUYVY;
+  else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
+           video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
+    _captureVideoType = VideoType::kMJPEG;
 
-    //set format and frame size now
-    if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0)
-    {
-        LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
-        return -1;
-    }
+  // set format and frame size now
+  if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0) {
+    LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
+    return -1;
+  }
 
-    // initialize current width and height
-    _currentWidth = video_fmt.fmt.pix.width;
-    _currentHeight = video_fmt.fmt.pix.height;
+  // initialize current width and height
+  _currentWidth = video_fmt.fmt.pix.width;
+  _currentHeight = video_fmt.fmt.pix.height;
 
-    // Trying to set frame rate, before check driver capability.
-    bool driver_framerate_support = true;
-    struct v4l2_streamparm streamparms;
-    memset(&streamparms, 0, sizeof(streamparms));
-    streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
-        LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
+  // Trying to set frame rate, before check driver capability.
+  bool driver_framerate_support = true;
+  struct v4l2_streamparm streamparms;
+  memset(&streamparms, 0, sizeof(streamparms));
+  streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
+    LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
+    driver_framerate_support = false;
+    // continue
+  } else {
+    // check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
+    if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+      // driver supports the feature. Set required framerate.
+      memset(&streamparms, 0, sizeof(streamparms));
+      streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+      streamparms.parm.capture.timeperframe.numerator = 1;
+      streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
+      if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
+        LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
         driver_framerate_support = false;
-      // continue
-    } else {
-      // check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
-      if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
-        // driver supports the feature. Set required framerate.
-        memset(&streamparms, 0, sizeof(streamparms));
-        streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-        streamparms.parm.capture.timeperframe.numerator = 1;
-        streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
-        if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
-          LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
-          driver_framerate_support = false;
-        } else {
-          _currentFrameRate = capability.maxFPS;
-        }
-      }
-    }
-    // If driver doesn't support framerate control, need to hardcode.
-    // Hardcoding the value based on the frame size.
-    if (!driver_framerate_support) {
-      if (_currentWidth >= 800 && _captureVideoType != VideoType::kMJPEG) {
-        _currentFrameRate = 15;
       } else {
-        _currentFrameRate = 30;
+        _currentFrameRate = capability.maxFPS;
       }
     }
-
-    if (!AllocateVideoBuffers())
-    {
-        LOG(LS_INFO) << "failed to allocate video capture buffers";
-        return -1;
+  }
+  // If driver doesn't support framerate control, need to hardcode.
+  // Hardcoding the value based on the frame size.
+  if (!driver_framerate_support) {
+    if (_currentWidth >= 800 && _captureVideoType != VideoType::kMJPEG) {
+      _currentFrameRate = 15;
+    } else {
+      _currentFrameRate = 30;
     }
+  }
 
-    //start capture thread;
-    if (!_captureThread)
-    {
-        _captureThread.reset(new rtc::PlatformThread(
-            VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread"));
-        _captureThread->Start();
-        _captureThread->SetPriority(rtc::kHighPriority);
-    }
+  if (!AllocateVideoBuffers()) {
+    LOG(LS_INFO) << "failed to allocate video capture buffers";
+    return -1;
+  }
 
-    // Needed to start UVC camera - from the uvcview application
-    enum v4l2_buf_type type;
-    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1)
-    {
-        LOG(LS_INFO) << "Failed to turn on stream";
-        return -1;
-    }
+  // start capture thread;
+  if (!_captureThread) {
+    _captureThread.reset(new rtc::PlatformThread(
+        VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread"));
+    _captureThread->Start();
+    _captureThread->SetPriority(rtc::kHighPriority);
+  }
 
-    _captureStarted = true;
-    return 0;
+  // Needed to start UVC camera - from the uvcview application
+  enum v4l2_buf_type type;
+  type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1) {
+    LOG(LS_INFO) << "Failed to turn on stream";
+    return -1;
+  }
+
+  _captureStarted = true;
+  return 0;
 }
 
-int32_t VideoCaptureModuleV4L2::StopCapture()
-{
-    if (_captureThread) {
-        // Make sure the capture thread stop stop using the critsect.
-        _captureThread->Stop();
-        _captureThread.reset();
-    }
+int32_t VideoCaptureModuleV4L2::StopCapture() {
+  if (_captureThread) {
+    // Make sure the capture thread stop stop using the critsect.
+    _captureThread->Stop();
+    _captureThread.reset();
+  }
 
-    rtc::CritScope cs(&_captureCritSect);
-    if (_captureStarted)
-    {
-        _captureStarted = false;
+  rtc::CritScope cs(&_captureCritSect);
+  if (_captureStarted) {
+    _captureStarted = false;
 
-        DeAllocateVideoBuffers();
-        close(_deviceFd);
-        _deviceFd = -1;
-    }
+    DeAllocateVideoBuffers();
+    close(_deviceFd);
+    _deviceFd = -1;
+  }
 
-    return 0;
+  return 0;
 }
 
-//critical section protected by the caller
+// critical section protected by the caller
 
-bool VideoCaptureModuleV4L2::AllocateVideoBuffers()
-{
-    struct v4l2_requestbuffers rbuffer;
-    memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
+bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
+  struct v4l2_requestbuffers rbuffer;
+  memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
 
-    rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    rbuffer.memory = V4L2_MEMORY_MMAP;
+  rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  rbuffer.memory = V4L2_MEMORY_MMAP;
+  rbuffer.count = kNoOfV4L2Bufffers;
+
+  if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0) {
+    LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
+    return false;
+  }
+
+  if (rbuffer.count > kNoOfV4L2Bufffers)
     rbuffer.count = kNoOfV4L2Bufffers;
 
-    if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0)
-    {
-        LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
-        return false;
+  _buffersAllocatedByDevice = rbuffer.count;
+
+  // Map the buffers
+  _pool = new Buffer[rbuffer.count];
+
+  for (unsigned int i = 0; i < rbuffer.count; i++) {
+    struct v4l2_buffer buffer;
+    memset(&buffer, 0, sizeof(v4l2_buffer));
+    buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    buffer.memory = V4L2_MEMORY_MMAP;
+    buffer.index = i;
+
+    if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0) {
+      return false;
     }
 
-    if (rbuffer.count > kNoOfV4L2Bufffers)
-        rbuffer.count = kNoOfV4L2Bufffers;
+    _pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
+                          MAP_SHARED, _deviceFd, buffer.m.offset);
 
-    _buffersAllocatedByDevice = rbuffer.count;
-
-    //Map the buffers
-    _pool = new Buffer[rbuffer.count];
-
-    for (unsigned int i = 0; i < rbuffer.count; i++)
-    {
-        struct v4l2_buffer buffer;
-        memset(&buffer, 0, sizeof(v4l2_buffer));
-        buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-        buffer.memory = V4L2_MEMORY_MMAP;
-        buffer.index = i;
-
-        if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0)
-        {
-            return false;
-        }
-
-        _pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE, MAP_SHARED,
-                              _deviceFd, buffer.m.offset);
-
-        if (MAP_FAILED == _pool[i].start)
-        {
-            for (unsigned int j = 0; j < i; j++)
-                munmap(_pool[j].start, _pool[j].length);
-            return false;
-        }
-
-        _pool[i].length = buffer.length;
-
-        if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0)
-        {
-            return false;
-        }
+    if (MAP_FAILED == _pool[i].start) {
+      for (unsigned int j = 0; j < i; j++)
+        munmap(_pool[j].start, _pool[j].length);
+      return false;
     }
+
+    _pool[i].length = buffer.length;
+
+    if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
+  // unmap buffers
+  for (int i = 0; i < _buffersAllocatedByDevice; i++)
+    munmap(_pool[i].start, _pool[i].length);
+
+  delete[] _pool;
+
+  // turn off stream
+  enum v4l2_buf_type type;
+  type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0) {
+    LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
+  }
+
+  return true;
+}
+
+bool VideoCaptureModuleV4L2::CaptureStarted() {
+  return _captureStarted;
+}
+
+bool VideoCaptureModuleV4L2::CaptureThread(void* obj) {
+  return static_cast<VideoCaptureModuleV4L2*>(obj)->CaptureProcess();
+}
+bool VideoCaptureModuleV4L2::CaptureProcess() {
+  int retVal = 0;
+  fd_set rSet;
+  struct timeval timeout;
+
+  rtc::CritScope cs(&_captureCritSect);
+
+  FD_ZERO(&rSet);
+  FD_SET(_deviceFd, &rSet);
+  timeout.tv_sec = 1;
+  timeout.tv_usec = 0;
+
+  retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
+  if (retVal < 0 && errno != EINTR)  // continue if interrupted
+  {
+    // select failed
+    return false;
+  } else if (retVal == 0) {
+    // select timed out
     return true;
-}
-
-bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers()
-{
-    // unmap buffers
-    for (int i = 0; i < _buffersAllocatedByDevice; i++)
-        munmap(_pool[i].start, _pool[i].length);
-
-    delete[] _pool;
-
-    // turn off stream
-    enum v4l2_buf_type type;
-    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-    if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0)
-    {
-        LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
-    }
-
+  } else if (!FD_ISSET(_deviceFd, &rSet)) {
+    // not event on camera handle
     return true;
-}
+  }
 
-bool VideoCaptureModuleV4L2::CaptureStarted()
-{
-    return _captureStarted;
-}
-
-bool VideoCaptureModuleV4L2::CaptureThread(void* obj)
-{
-    return static_cast<VideoCaptureModuleV4L2*> (obj)->CaptureProcess();
-}
-bool VideoCaptureModuleV4L2::CaptureProcess()
-{
-    int retVal = 0;
-    fd_set rSet;
-    struct timeval timeout;
-
-    rtc::CritScope cs(&_captureCritSect);
-
-    FD_ZERO(&rSet);
-    FD_SET(_deviceFd, &rSet);
-    timeout.tv_sec = 1;
-    timeout.tv_usec = 0;
-
-    retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
-    if (retVal < 0 && errno != EINTR) // continue if interrupted
-    {
-        // select failed
-        return false;
-    }
-    else if (retVal == 0)
-    {
-        // select timed out
+  if (_captureStarted) {
+    struct v4l2_buffer buf;
+    memset(&buf, 0, sizeof(struct v4l2_buffer));
+    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    buf.memory = V4L2_MEMORY_MMAP;
+    // dequeue a buffer - repeat until dequeued properly!
+    while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0) {
+      if (errno != EINTR) {
+        LOG(LS_INFO) << "could not sync on a buffer on device "
+                     << strerror(errno);
         return true;
+      }
     }
-    else if (!FD_ISSET(_deviceFd, &rSet))
-    {
-        // not event on camera handle
-        return true;
-    }
+    VideoCaptureCapability frameInfo;
+    frameInfo.width = _currentWidth;
+    frameInfo.height = _currentHeight;
+    frameInfo.videoType = _captureVideoType;
 
-    if (_captureStarted)
-    {
-        struct v4l2_buffer buf;
-        memset(&buf, 0, sizeof(struct v4l2_buffer));
-        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-        buf.memory = V4L2_MEMORY_MMAP;
-        // dequeue a buffer - repeat until dequeued properly!
-        while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0)
-        {
-            if (errno != EINTR)
-            {
-                LOG(LS_INFO) << "could not sync on a buffer on device "
-                             << strerror(errno);
-                return true;
-            }
-        }
-        VideoCaptureCapability frameInfo;
-        frameInfo.width = _currentWidth;
-        frameInfo.height = _currentHeight;
-        frameInfo.videoType = _captureVideoType;
-
-        // convert to to I420 if needed
-        IncomingFrame((unsigned char*) _pool[buf.index].start,
-                      buf.bytesused, frameInfo);
-        // enqueue the buffer again
-        if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1)
-        {
-            LOG(LS_INFO) << "Failed to enqueue capture buffer";
-        }
+    // convert to to I420 if needed
+    IncomingFrame((unsigned char*)_pool[buf.index].start, buf.bytesused,
+                  frameInfo);
+    // enqueue the buffer again
+    if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1) {
+      LOG(LS_INFO) << "Failed to enqueue capture buffer";
     }
-    usleep(0);
-    return true;
+  }
+  usleep(0);
+  return true;
 }
 
-int32_t VideoCaptureModuleV4L2::CaptureSettings(VideoCaptureCapability& settings)
-{
-    settings.width = _currentWidth;
-    settings.height = _currentHeight;
-    settings.maxFPS = _currentFrameRate;
-    settings.videoType = _captureVideoType;
+int32_t VideoCaptureModuleV4L2::CaptureSettings(
+    VideoCaptureCapability& settings) {
+  settings.width = _currentWidth;
+  settings.height = _currentHeight;
+  settings.maxFPS = _currentFrameRate;
+  settings.videoType = _captureVideoType;
 
-    return 0;
+  return 0;
 }
 }  // namespace videocapturemodule
 }  // namespace webrtc
diff --git a/modules/video_capture/objc/device_info.mm b/modules/video_capture/objc/device_info.mm
index f5a76d3..aecc01b 100644
--- a/modules/video_capture/objc/device_info.mm
+++ b/modules/video_capture/objc/device_info.mm
@@ -25,11 +25,12 @@
 using namespace videocapturemodule;
 
 static NSArray* camera_presets = @[
-  AVCaptureSessionPreset352x288, AVCaptureSessionPreset640x480,
+  AVCaptureSessionPreset352x288,
+  AVCaptureSessionPreset640x480,
   AVCaptureSessionPreset1280x720
 ];
 
-#define IOS_UNSUPPORTED()                                                 \
+#define IOS_UNSUPPORTED()                                                    \
   LOG(LS_ERROR) << __FUNCTION__ << " is not supported on the iOS platform."; \
   return -1;
 
@@ -55,8 +56,7 @@
     for (NSString* preset in camera_presets) {
       BOOL support = [avDevice supportsAVCaptureSessionPreset:preset];
       if (support) {
-        VideoCaptureCapability capability =
-            [DeviceInfoIosObjC capabilityForPreset:preset];
+        VideoCaptureCapability capability = [DeviceInfoIosObjC capabilityForPreset:preset];
         capabilityVector.push_back(capability);
       }
     }
@@ -66,8 +66,7 @@
     this->GetDeviceName(i, deviceNameUTF8, 256, deviceId, 256);
     std::string deviceIdCopy(deviceId);
     std::pair<std::string, VideoCaptureCapabilities> mapPair =
-        std::pair<std::string, VideoCaptureCapabilities>(deviceIdCopy,
-                                                         capabilityVector);
+        std::pair<std::string, VideoCaptureCapabilities>(deviceIdCopy, capabilityVector);
     _capabilitiesMap.insert(mapPair);
   }
 
@@ -87,8 +86,7 @@
                                      uint32_t productUniqueIdUTF8Length) {
   NSString* deviceName = [DeviceInfoIosObjC deviceNameForIndex:deviceNumber];
 
-  NSString* deviceUniqueId =
-      [DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
+  NSString* deviceUniqueId = [DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
 
   strncpy(deviceNameUTF8, [deviceName UTF8String], deviceNameUTF8Length);
   deviceNameUTF8[deviceNameUTF8Length - 1] = '\0';
@@ -136,17 +134,15 @@
   return -1;
 }
 
-int32_t DeviceInfoIos::DisplayCaptureSettingsDialogBox(
-    const char* deviceUniqueIdUTF8,
-    const char* dialogTitleUTF8,
-    void* parentWindow,
-    uint32_t positionX,
-    uint32_t positionY) {
+int32_t DeviceInfoIos::DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
+                                                       const char* dialogTitleUTF8,
+                                                       void* parentWindow,
+                                                       uint32_t positionX,
+                                                       uint32_t positionY) {
   IOS_UNSUPPORTED();
 }
 
-int32_t DeviceInfoIos::GetOrientation(const char* deviceUniqueIdUTF8,
-                                      VideoRotation& orientation) {
+int32_t DeviceInfoIos::GetOrientation(const char* deviceUniqueIdUTF8, VideoRotation& orientation) {
   if (strcmp(deviceUniqueIdUTF8, "Front Camera") == 0) {
     orientation = kVideoRotation_0;
   } else {
diff --git a/modules/video_capture/objc/rtc_video_capture_objc.mm b/modules/video_capture/objc/rtc_video_capture_objc.mm
index 0489d3d..8b65dc0 100644
--- a/modules/video_capture/objc/rtc_video_capture_objc.mm
+++ b/modules/video_capture/objc/rtc_video_capture_objc.mm
@@ -56,22 +56,18 @@
     }
 
     // create and configure a new output (using callbacks)
-    AVCaptureVideoDataOutput* captureOutput =
-        [[AVCaptureVideoDataOutput alloc] init];
+    AVCaptureVideoDataOutput* captureOutput = [[AVCaptureVideoDataOutput alloc] init];
     NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
 
-    NSNumber* val = [NSNumber
-        numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
-    NSDictionary* videoSettings =
-        [NSDictionary dictionaryWithObject:val forKey:key];
+    NSNumber* val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
+    NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key];
     captureOutput.videoSettings = videoSettings;
 
     // add new output
     if ([_captureSession canAddOutput:captureOutput]) {
       [_captureSession addOutput:captureOutput];
     } else {
-      LOG(LS_ERROR) << __FUNCTION__
-                    << ": Could not add output to AVCaptureSession";
+      LOG(LS_ERROR) << __FUNCTION__ << ": Could not add output to AVCaptureSession";
     }
 
 #ifdef WEBRTC_IOS
@@ -95,8 +91,7 @@
 - (void)directOutputToSelf {
   [[self currentOutput]
       setSampleBufferDelegate:self
-                        queue:dispatch_get_global_queue(
-                                  DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
+                        queue:dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
 }
 
 - (void)directOutputToNil {
@@ -143,13 +138,11 @@
     if (capability.width > 1280 || capability.height > 720) {
       return NO;
     }
-  } else if ([_captureSession
-                 canSetSessionPreset:AVCaptureSessionPreset640x480]) {
+  } else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {
     if (capability.width > 640 || capability.height > 480) {
       return NO;
     }
-  } else if ([_captureSession
-                 canSetSessionPreset:AVCaptureSessionPreset352x288]) {
+  } else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset352x288]) {
     if (capability.width > 352 || capability.height > 288) {
       return NO;
     }
@@ -160,17 +153,15 @@
   _capability = capability;
 
   AVCaptureVideoDataOutput* currentOutput = [self currentOutput];
-  if (!currentOutput)
-    return NO;
+  if (!currentOutput) return NO;
 
   [self directOutputToSelf];
 
   _orientationHasChanged = NO;
   _captureChanging = YES;
-  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
-                 ^{
-                   [self startCaptureInBackgroundWithOutput:currentOutput];
-                 });
+  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+    [self startCaptureInBackgroundWithOutput:currentOutput];
+  });
   return YES;
 }
 
@@ -178,10 +169,8 @@
   return [[_captureSession outputs] firstObject];
 }
 
-- (void)startCaptureInBackgroundWithOutput:
-    (AVCaptureVideoDataOutput*)currentOutput {
-  NSString* captureQuality =
-      [NSString stringWithString:AVCaptureSessionPresetLow];
+- (void)startCaptureInBackgroundWithOutput:(AVCaptureVideoDataOutput*)currentOutput {
+  NSString* captureQuality = [NSString stringWithString:AVCaptureSessionPresetLow];
   if (_capability.width >= 1280 || _capability.height >= 720) {
     captureQuality = [NSString stringWithString:AVCaptureSessionPreset1280x720];
   } else if (_capability.width >= 640 || _capability.height >= 480) {
@@ -219,8 +208,7 @@
       _connection.videoOrientation = AVCaptureVideoOrientationPortrait;
       break;
     case UIDeviceOrientationPortraitUpsideDown:
-      _connection.videoOrientation =
-          AVCaptureVideoOrientationPortraitUpsideDown;
+      _connection.videoOrientation = AVCaptureVideoOrientationPortraitUpsideDown;
       break;
     case UIDeviceOrientationLandscapeLeft:
       _connection.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
@@ -258,10 +246,9 @@
   }
 
   _captureChanging = YES;
-  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
-                 ^(void) {
-                   [self stopCaptureInBackground];
-                 });
+  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void) {
+    [self stopCaptureInBackground];
+  });
   return YES;
 }
 
@@ -275,8 +262,7 @@
   NSArray* currentInputs = [_captureSession inputs];
   // remove current input
   if ([currentInputs count] > 0) {
-    AVCaptureInput* currentInput =
-        (AVCaptureInput*)[currentInputs objectAtIndex:0];
+    AVCaptureInput* currentInput = (AVCaptureInput*)[currentInputs objectAtIndex:0];
 
     [_captureSession removeInput:currentInput];
   }
@@ -288,8 +274,7 @@
     return NO;
   }
 
-  AVCaptureDevice* captureDevice =
-      [DeviceInfoIosObjC captureDeviceForUniqueId:uniqueId];
+  AVCaptureDevice* captureDevice = [DeviceInfoIosObjC captureDeviceForUniqueId:uniqueId];
 
   if (!captureDevice) {
     return NO;
@@ -298,14 +283,12 @@
   // now create capture session input out of AVCaptureDevice
   NSError* deviceError = nil;
   AVCaptureDeviceInput* newCaptureInput =
-      [AVCaptureDeviceInput deviceInputWithDevice:captureDevice
-                                            error:&deviceError];
+      [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&deviceError];
 
   if (!newCaptureInput) {
     const char* errorMessage = [[deviceError localizedDescription] UTF8String];
 
-    LOG(LS_ERROR) << __FUNCTION__ << ": deviceInputWithDevice error:"
-                  << errorMessage;
+    LOG(LS_ERROR) << __FUNCTION__ << ": deviceInputWithDevice error:" << errorMessage;
 
     return NO;
   }
@@ -339,17 +322,12 @@
   const int kYPlaneIndex = 0;
   const int kUVPlaneIndex = 1;
 
-  uint8_t* baseAddress =
-      (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(videoFrame, kYPlaneIndex);
-  size_t yPlaneBytesPerRow =
-      CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kYPlaneIndex);
+  uint8_t* baseAddress = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(videoFrame, kYPlaneIndex);
+  size_t yPlaneBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kYPlaneIndex);
   size_t yPlaneHeight = CVPixelBufferGetHeightOfPlane(videoFrame, kYPlaneIndex);
-  size_t uvPlaneBytesPerRow =
-      CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kUVPlaneIndex);
-  size_t uvPlaneHeight =
-      CVPixelBufferGetHeightOfPlane(videoFrame, kUVPlaneIndex);
-  size_t frameSize =
-      yPlaneBytesPerRow * yPlaneHeight + uvPlaneBytesPerRow * uvPlaneHeight;
+  size_t uvPlaneBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kUVPlaneIndex);
+  size_t uvPlaneHeight = CVPixelBufferGetHeightOfPlane(videoFrame, kUVPlaneIndex);
+  size_t frameSize = yPlaneBytesPerRow * yPlaneHeight + uvPlaneBytesPerRow * uvPlaneHeight;
 
   VideoCaptureCapability tempCaptureCapability;
   tempCaptureCapability.width = CVPixelBufferGetWidth(videoFrame);
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
index 1f40ee7..88890a6 100644
--- a/modules/video_capture/video_capture_impl.cc
+++ b/modules/video_capture/video_capture_impl.cc
@@ -54,7 +54,8 @@
       *rotation = kVideoRotation_270;
       return 0;
     default:
-      return -1;;
+      return -1;
+      ;
   }
 }
 
@@ -87,29 +88,28 @@
       _lastProcessFrameTimeNanos(rtc::TimeNanos()),
       _rotateFrame(kVideoRotation_0),
       apply_rotation_(false) {
-    _requestedCapability.width = kDefaultWidth;
-    _requestedCapability.height = kDefaultHeight;
-    _requestedCapability.maxFPS = 30;
-    _requestedCapability.videoType = VideoType::kI420;
-    memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
+  _requestedCapability.width = kDefaultWidth;
+  _requestedCapability.height = kDefaultHeight;
+  _requestedCapability.maxFPS = 30;
+  _requestedCapability.videoType = VideoType::kI420;
+  memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
 }
 
-VideoCaptureImpl::~VideoCaptureImpl()
-{
-    DeRegisterCaptureDataCallback();
-    if (_deviceUniqueId)
-        delete[] _deviceUniqueId;
+VideoCaptureImpl::~VideoCaptureImpl() {
+  DeRegisterCaptureDataCallback();
+  if (_deviceUniqueId)
+    delete[] _deviceUniqueId;
 }
 
 void VideoCaptureImpl::RegisterCaptureDataCallback(
     rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
-    rtc::CritScope cs(&_apiCs);
-    _dataCallBack = dataCallBack;
+  rtc::CritScope cs(&_apiCs);
+  _dataCallBack = dataCallBack;
 }
 
 void VideoCaptureImpl::DeRegisterCaptureDataCallback() {
-    rtc::CritScope cs(&_apiCs);
-    _dataCallBack = NULL;
+  rtc::CritScope cs(&_apiCs);
+  _dataCallBack = NULL;
 }
 int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
   UpdateFrameCount();  // frame count used for local frame rate callback.
@@ -121,68 +121,66 @@
   return 0;
 }
 
-int32_t VideoCaptureImpl::IncomingFrame(
-    uint8_t* videoFrame,
-    size_t videoFrameLength,
-    const VideoCaptureCapability& frameInfo,
-    int64_t captureTime/*=0*/)
-{
-    rtc::CritScope cs(&_apiCs);
+int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
+                                        size_t videoFrameLength,
+                                        const VideoCaptureCapability& frameInfo,
+                                        int64_t captureTime /*=0*/) {
+  rtc::CritScope cs(&_apiCs);
 
-    const int32_t width = frameInfo.width;
-    const int32_t height = frameInfo.height;
+  const int32_t width = frameInfo.width;
+  const int32_t height = frameInfo.height;
 
-    TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
+  TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
 
-    // Not encoded, convert to I420.
-    if (frameInfo.videoType != VideoType::kMJPEG &&
-        CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
-            videoFrameLength) {
-      LOG(LS_ERROR) << "Wrong incoming frame length.";
-      return -1;
+  // Not encoded, convert to I420.
+  if (frameInfo.videoType != VideoType::kMJPEG &&
+      CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
+          videoFrameLength) {
+    LOG(LS_ERROR) << "Wrong incoming frame length.";
+    return -1;
+  }
+
+  int stride_y = width;
+  int stride_uv = (width + 1) / 2;
+  int target_width = width;
+  int target_height = height;
+
+  // SetApplyRotation doesn't take any lock. Make a local copy here.
+  bool apply_rotation = apply_rotation_;
+
+  if (apply_rotation) {
+    // Rotating resolution when for 90/270 degree rotations.
+    if (_rotateFrame == kVideoRotation_90 ||
+        _rotateFrame == kVideoRotation_270) {
+      target_width = abs(height);
+      target_height = width;
     }
+  }
 
-    int stride_y = width;
-    int stride_uv = (width + 1) / 2;
-    int target_width = width;
-    int target_height = height;
+  // Setting absolute height (in case it was negative).
+  // In Windows, the image starts bottom left, instead of top left.
+  // Setting a negative source height, inverts the image (within LibYuv).
 
-    // SetApplyRotation doesn't take any lock. Make a local copy here.
-    bool apply_rotation = apply_rotation_;
+  // TODO(nisse): Use a pool?
+  rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
+      target_width, abs(target_height), stride_y, stride_uv, stride_uv);
+  const int conversionResult = ConvertToI420(
+      frameInfo.videoType, videoFrame, 0, 0,  // No cropping
+      width, height, videoFrameLength,
+      apply_rotation ? _rotateFrame : kVideoRotation_0, buffer.get());
+  if (conversionResult < 0) {
+    LOG(LS_ERROR) << "Failed to convert capture frame from type "
+                  << static_cast<int>(frameInfo.videoType) << "to I420.";
+    return -1;
+  }
 
-    if (apply_rotation) {
-      // Rotating resolution when for 90/270 degree rotations.
-      if (_rotateFrame == kVideoRotation_90 ||
-          _rotateFrame == kVideoRotation_270) {
-        target_width = abs(height);
-        target_height = width;
-      }
-    }
+  VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(),
+                          !apply_rotation ? _rotateFrame : kVideoRotation_0);
+  captureFrame.set_ntp_time_ms(captureTime);
 
-    // Setting absolute height (in case it was negative).
-    // In Windows, the image starts bottom left, instead of top left.
-    // Setting a negative source height, inverts the image (within LibYuv).
+  DeliverCapturedFrame(captureFrame);
 
-    // TODO(nisse): Use a pool?
-    rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
-        target_width, abs(target_height), stride_y, stride_uv, stride_uv);
-    const int conversionResult = ConvertToI420(
-        frameInfo.videoType, videoFrame, 0, 0,  // No cropping
-        width, height, videoFrameLength,
-        apply_rotation ? _rotateFrame : kVideoRotation_0, buffer.get());
-    if (conversionResult < 0) {
-      LOG(LS_ERROR) << "Failed to convert capture frame from type "
-                    << static_cast<int>(frameInfo.videoType) << "to I420.";
-      return -1;
-    }
-
-    VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(),
-                            !apply_rotation ? _rotateFrame : kVideoRotation_0);
-    captureFrame.set_ntp_time_ms(captureTime);
-
-    DeliverCapturedFrame(captureFrame);
-
-    return 0;
+  return 0;
 }
 
 int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
diff --git a/modules/video_capture/windows/device_info_ds.cc b/modules/video_capture/windows/device_info_ds.cc
index 1911fd9..d0946c4 100644
--- a/modules/video_capture/windows/device_info_ds.cc
+++ b/modules/video_capture/windows/device_info_ds.cc
@@ -19,326 +19,266 @@
 #include <Dvdmedia.h>
 #include <Streams.h>
 
-namespace webrtc
-{
-namespace videocapturemodule
-{
+namespace webrtc {
+namespace videocapturemodule {
 
 // static
-DeviceInfoDS* DeviceInfoDS::Create()
-{
-    DeviceInfoDS* dsInfo = new DeviceInfoDS();
-    if (!dsInfo || dsInfo->Init() != 0)
-    {
-        delete dsInfo;
-        dsInfo = NULL;
-    }
-    return dsInfo;
+DeviceInfoDS* DeviceInfoDS::Create() {
+  DeviceInfoDS* dsInfo = new DeviceInfoDS();
+  if (!dsInfo || dsInfo->Init() != 0) {
+    delete dsInfo;
+    dsInfo = NULL;
+  }
+  return dsInfo;
 }
 
 DeviceInfoDS::DeviceInfoDS()
-    : _dsDevEnum(NULL), _dsMonikerDevEnum(NULL),
-      _CoUninitializeIsRequired(true)
-{
-    // 1) Initialize the COM library (make Windows load the DLLs).
-    //
-    // CoInitializeEx must be called at least once, and is usually called only once,
-    // for each thread that uses the COM library. Multiple calls to CoInitializeEx
-    // by the same thread are allowed as long as they pass the same concurrency flag,
-    // but subsequent valid calls return S_FALSE.
-    // To close the COM library gracefully on a thread, each successful call to
-    // CoInitializeEx, including any call that returns S_FALSE, must be balanced
-    // by a corresponding call to CoUninitialize.
-    //
+    : _dsDevEnum(NULL),
+      _dsMonikerDevEnum(NULL),
+      _CoUninitializeIsRequired(true) {
+  // 1) Initialize the COM library (make Windows load the DLLs).
+  //
+  // CoInitializeEx must be called at least once, and is usually called only
+  // once, for each thread that uses the COM library. Multiple calls to
+  // CoInitializeEx by the same thread are allowed as long as they pass the same
+  // concurrency flag, but subsequent valid calls return S_FALSE. To close the
+  // COM library gracefully on a thread, each successful call to CoInitializeEx,
+  // including any call that returns S_FALSE, must be balanced by a
+  // corresponding call to CoUninitialize.
+  //
 
-    /*Apartment-threading, while allowing for multiple threads of execution,
-     serializes all incoming calls by requiring that calls to methods of objects created by this thread always run on the same thread
-     the apartment/thread that created them. In addition, calls can arrive only at message-queue boundaries (i.e., only during a
-     PeekMessage, SendMessage, DispatchMessage, etc.). Because of this serialization, it is not typically necessary to write concurrency control into
-     the code for the object, other than to avoid calls to PeekMessage and SendMessage during processing that must not be interrupted by other method
-     invocations or calls to other objects in the same apartment/thread.*/
+  /*Apartment-threading, while allowing for multiple threads of execution,
+   serializes all incoming calls by requiring that calls to methods of objects
+   created by this thread always run on the same thread the apartment/thread
+   that created them. In addition, calls can arrive only at message-queue
+   boundaries (i.e., only during a PeekMessage, SendMessage, DispatchMessage,
+   etc.). Because of this serialization, it is not typically necessary to write
+   concurrency control into the code for the object, other than to avoid calls
+   to PeekMessage and SendMessage during processing that must not be interrupted
+   by other method invocations or calls to other objects in the same
+   apartment/thread.*/
 
-    ///CoInitializeEx(NULL, COINIT_APARTMENTTHREADED ); //| COINIT_SPEED_OVER_MEMORY
-    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); // Use COINIT_MULTITHREADED since Voice Engine uses COINIT_MULTITHREADED
-    if (FAILED(hr))
-    {
-        // Avoid calling CoUninitialize() since CoInitializeEx() failed.
-        _CoUninitializeIsRequired = FALSE;
+  /// CoInitializeEx(NULL, COINIT_APARTMENTTHREADED ); //|
+  /// COINIT_SPEED_OVER_MEMORY
+  HRESULT hr = CoInitializeEx(
+      NULL, COINIT_MULTITHREADED);  // Use COINIT_MULTITHREADED since Voice
+                                    // Engine uses COINIT_MULTITHREADED
+  if (FAILED(hr)) {
+    // Avoid calling CoUninitialize() since CoInitializeEx() failed.
+    _CoUninitializeIsRequired = FALSE;
 
-        if (hr == RPC_E_CHANGED_MODE)
-        {
-            // Calling thread has already initialized COM to be used in a single-threaded
-            // apartment (STA). We are then prevented from using STA.
-            // Details: hr = 0x80010106 <=> "Cannot change thread mode after it is set".
-            //
-            LOG(LS_INFO) << __FUNCTION__
-                         << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
-                         << " => RPC_E_CHANGED_MODE, error 0x" << std::hex
-                         << hr;
-        }
+    if (hr == RPC_E_CHANGED_MODE) {
+      // Calling thread has already initialized COM to be used in a
+      // single-threaded apartment (STA). We are then prevented from using STA.
+      // Details: hr = 0x80010106 <=> "Cannot change thread mode after it is
+      // set".
+      //
+      LOG(LS_INFO) << __FUNCTION__
+                   << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
+                   << " => RPC_E_CHANGED_MODE, error 0x" << std::hex << hr;
     }
+  }
 }
 
-DeviceInfoDS::~DeviceInfoDS()
-{
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
-    RELEASE_AND_CLEAR(_dsDevEnum);
-    if (_CoUninitializeIsRequired)
-    {
-        CoUninitialize();
-    }
+DeviceInfoDS::~DeviceInfoDS() {
+  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+  RELEASE_AND_CLEAR(_dsDevEnum);
+  if (_CoUninitializeIsRequired) {
+    CoUninitialize();
+  }
 }
 
-int32_t DeviceInfoDS::Init()
+int32_t DeviceInfoDS::Init() {
+  HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
+                                IID_ICreateDevEnum, (void**)&_dsDevEnum);
+  if (hr != NOERROR) {
+    LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
+                 << std::hex << hr;
+    return -1;
+  }
+  return 0;
+}
+uint32_t DeviceInfoDS::NumberOfDevices() {
+  ReadLockScoped cs(_apiLock);
+  return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0);
+}
+
+int32_t DeviceInfoDS::GetDeviceName(uint32_t deviceNumber,
+                                    char* deviceNameUTF8,
+                                    uint32_t deviceNameLength,
+                                    char* deviceUniqueIdUTF8,
+                                    uint32_t deviceUniqueIdUTF8Length,
+                                    char* productUniqueIdUTF8,
+                                    uint32_t productUniqueIdUTF8Length) {
+  ReadLockScoped cs(_apiLock);
+  const int32_t result = GetDeviceInfo(
+      deviceNumber, deviceNameUTF8, deviceNameLength, deviceUniqueIdUTF8,
+      deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length);
+  return result > (int32_t)deviceNumber ? 0 : -1;
+}
+
+int32_t DeviceInfoDS::GetDeviceInfo(uint32_t deviceNumber,
+                                    char* deviceNameUTF8,
+                                    uint32_t deviceNameLength,
+                                    char* deviceUniqueIdUTF8,
+                                    uint32_t deviceUniqueIdUTF8Length,
+                                    char* productUniqueIdUTF8,
+                                    uint32_t productUniqueIdUTF8Length)
+
 {
-    HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
-                                  IID_ICreateDevEnum, (void **) &_dsDevEnum);
-    if (hr != NOERROR)
-    {
-        LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
-                     << std::hex << hr;
-        return -1;
-    }
+  // enumerate all video capture devices
+  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+  HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+                                                 &_dsMonikerDevEnum, 0);
+  if (hr != NOERROR) {
+    LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
+                 << std::hex << hr << ". No webcam exist?";
     return 0;
-}
-uint32_t DeviceInfoDS::NumberOfDevices()
-{
-    ReadLockScoped cs(_apiLock);
-    return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0);
-}
+  }
 
-int32_t DeviceInfoDS::GetDeviceName(
-                                       uint32_t deviceNumber,
-                                       char* deviceNameUTF8,
-                                       uint32_t deviceNameLength,
-                                       char* deviceUniqueIdUTF8,
-                                       uint32_t deviceUniqueIdUTF8Length,
-                                       char* productUniqueIdUTF8,
-                                       uint32_t productUniqueIdUTF8Length)
-{
-    ReadLockScoped cs(_apiLock);
-    const int32_t result = GetDeviceInfo(deviceNumber, deviceNameUTF8,
-                                         deviceNameLength,
-                                         deviceUniqueIdUTF8,
-                                         deviceUniqueIdUTF8Length,
-                                         productUniqueIdUTF8,
-                                         productUniqueIdUTF8Length);
-    return result > (int32_t) deviceNumber ? 0 : -1;
-}
-
-int32_t DeviceInfoDS::GetDeviceInfo(
-                                       uint32_t deviceNumber,
-                                       char* deviceNameUTF8,
-                                       uint32_t deviceNameLength,
-                                       char* deviceUniqueIdUTF8,
-                                       uint32_t deviceUniqueIdUTF8Length,
-                                       char* productUniqueIdUTF8,
-                                       uint32_t productUniqueIdUTF8Length)
-
-{
-
-    // enumerate all video capture devices
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
-    HRESULT hr =
-        _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
-                                          &_dsMonikerDevEnum, 0);
-    if (hr != NOERROR)
-    {
-        LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
-                     << std::hex << hr << ". No webcam exist?";
-        return 0;
-    }
-
-    _dsMonikerDevEnum->Reset();
-    ULONG cFetched;
-    IMoniker *pM;
-    int index = 0;
-    while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched))
-    {
-        IPropertyBag *pBag;
-        hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void **) &pBag);
-        if (S_OK == hr)
-        {
-            // Find the description or friendly name.
-            VARIANT varName;
-            VariantInit(&varName);
-            hr = pBag->Read(L"Description", &varName, 0);
-            if (FAILED(hr))
-            {
-                hr = pBag->Read(L"FriendlyName", &varName, 0);
+  _dsMonikerDevEnum->Reset();
+  ULONG cFetched;
+  IMoniker* pM;
+  int index = 0;
+  while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched)) {
+    IPropertyBag* pBag;
+    hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
+    if (S_OK == hr) {
+      // Find the description or friendly name.
+      VARIANT varName;
+      VariantInit(&varName);
+      hr = pBag->Read(L"Description", &varName, 0);
+      if (FAILED(hr)) {
+        hr = pBag->Read(L"FriendlyName", &varName, 0);
+      }
+      if (SUCCEEDED(hr)) {
+        // ignore all VFW drivers
+        if ((wcsstr(varName.bstrVal, (L"(VFW)")) == NULL) &&
+            (_wcsnicmp(varName.bstrVal, (L"Google Camera Adapter"), 21) != 0)) {
+          // Found a valid device.
+          if (index == static_cast<int>(deviceNumber)) {
+            int convResult = 0;
+            if (deviceNameLength > 0) {
+              convResult = WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
+                                               (char*)deviceNameUTF8,
+                                               deviceNameLength, NULL, NULL);
+              if (convResult == 0) {
+                LOG(LS_INFO) << "Failed to convert device name to UTF8, "
+                             << "error = " << GetLastError();
+                return -1;
+              }
             }
-            if (SUCCEEDED(hr))
-            {
-                // ignore all VFW drivers
-                if ((wcsstr(varName.bstrVal, (L"(VFW)")) == NULL) &&
-                    (_wcsnicmp(varName.bstrVal, (L"Google Camera Adapter"),21)
-                        != 0))
-                {
-                    // Found a valid device.
-                    if (index == static_cast<int>(deviceNumber))
-                    {
-                        int convResult = 0;
-                        if (deviceNameLength > 0)
-                        {
-                            convResult = WideCharToMultiByte(CP_UTF8, 0,
-                                                             varName.bstrVal, -1,
-                                                             (char*) deviceNameUTF8,
-                                                             deviceNameLength, NULL,
-                                                             NULL);
-                            if (convResult == 0)
-                            {
-                                LOG(LS_INFO)
-                                    << "Failed to convert device name to UTF8, "
-                                    << "error = " << GetLastError();
-                                return -1;
-                            }
-                        }
-                        if (deviceUniqueIdUTF8Length > 0)
-                        {
-                            hr = pBag->Read(L"DevicePath", &varName, 0);
-                            if (FAILED(hr))
-                            {
-                                strncpy_s((char *) deviceUniqueIdUTF8,
-                                          deviceUniqueIdUTF8Length,
-                                          (char *) deviceNameUTF8, convResult);
-                                LOG(LS_INFO) << "Failed to get "
-                                             << "deviceUniqueIdUTF8 using "
-                                             << "deviceNameUTF8";
-                            }
-                            else
-                            {
-                                convResult = WideCharToMultiByte(
-                                                          CP_UTF8,
-                                                          0,
-                                                          varName.bstrVal,
-                                                          -1,
-                                                          (char*) deviceUniqueIdUTF8,
-                                                          deviceUniqueIdUTF8Length,
-                                                          NULL, NULL);
-                                if (convResult == 0)
-                                {
-                                    LOG(LS_INFO) << "Failed to convert device "
-                                                 << "name to UTF8, error = "
-                                                 << GetLastError();
-                                    return -1;
-                                }
-                                if (productUniqueIdUTF8
-                                    && productUniqueIdUTF8Length > 0)
-                                {
-                                    GetProductId(deviceUniqueIdUTF8,
-                                                 productUniqueIdUTF8,
-                                                 productUniqueIdUTF8Length);
-                                }
-                            }
-                        }
-
-                    }
-                    ++index; // increase the number of valid devices
+            if (deviceUniqueIdUTF8Length > 0) {
+              hr = pBag->Read(L"DevicePath", &varName, 0);
+              if (FAILED(hr)) {
+                strncpy_s((char*)deviceUniqueIdUTF8, deviceUniqueIdUTF8Length,
+                          (char*)deviceNameUTF8, convResult);
+                LOG(LS_INFO) << "Failed to get "
+                             << "deviceUniqueIdUTF8 using "
+                             << "deviceNameUTF8";
+              } else {
+                convResult = WideCharToMultiByte(
+                    CP_UTF8, 0, varName.bstrVal, -1, (char*)deviceUniqueIdUTF8,
+                    deviceUniqueIdUTF8Length, NULL, NULL);
+                if (convResult == 0) {
+                  LOG(LS_INFO) << "Failed to convert device "
+                               << "name to UTF8, error = " << GetLastError();
+                  return -1;
                 }
+                if (productUniqueIdUTF8 && productUniqueIdUTF8Length > 0) {
+                  GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
+                               productUniqueIdUTF8Length);
+                }
+              }
             }
-            VariantClear(&varName);
-            pBag->Release();
-            pM->Release();
+          }
+          ++index;  // increase the number of valid devices
         }
-
+      }
+      VariantClear(&varName);
+      pBag->Release();
+      pM->Release();
     }
-    if (deviceNameLength)
-    {
-        LOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8;
-    }
-    return index;
+  }
+  if (deviceNameLength) {
+    LOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8;
+  }
+  return index;
 }
 
-IBaseFilter * DeviceInfoDS::GetDeviceFilter(
-                                     const char* deviceUniqueIdUTF8,
-                                     char* productUniqueIdUTF8,
-                                     uint32_t productUniqueIdUTF8Length)
-{
+IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8,
+                                           char* productUniqueIdUTF8,
+                                           uint32_t productUniqueIdUTF8Length) {
+  const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen(
+      (char*)deviceUniqueIdUTF8);  // UTF8 is also NULL terminated
+  if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
+    LOG(LS_INFO) << "Device name too long";
+    return NULL;
+  }
 
-    const int32_t deviceUniqueIdUTF8Length =
-        (int32_t) strlen((char*) deviceUniqueIdUTF8); // UTF8 is also NULL terminated
-    if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
-    {
-        LOG(LS_INFO) << "Device name too long";
-        return NULL;
-    }
+  // enumerate all video capture devices
+  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+  HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+                                                 &_dsMonikerDevEnum, 0);
+  if (hr != NOERROR) {
+    LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
+                 << std::hex << hr << ". No webcam exist?";
+    return 0;
+  }
+  _dsMonikerDevEnum->Reset();
+  ULONG cFetched;
+  IMoniker* pM;
 
-    // enumerate all video capture devices
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
-    HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
-                                                   &_dsMonikerDevEnum, 0);
-    if (hr != NOERROR)
-    {
-        LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
-                     << std::hex << hr << ". No webcam exist?";
-        return 0;
-    }
-    _dsMonikerDevEnum->Reset();
-    ULONG cFetched;
-    IMoniker *pM;
-
-    IBaseFilter *captureFilter = NULL;
-    bool deviceFound = false;
-    while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched) && !deviceFound)
-    {
-        IPropertyBag *pBag;
-        hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void **) &pBag);
-        if (S_OK == hr)
-        {
-            // Find the description or friendly name.
-            VARIANT varName;
-            VariantInit(&varName);
-            if (deviceUniqueIdUTF8Length > 0)
-            {
-                hr = pBag->Read(L"DevicePath", &varName, 0);
-                if (FAILED(hr))
-                {
-                    hr = pBag->Read(L"Description", &varName, 0);
-                    if (FAILED(hr))
-                    {
-                        hr = pBag->Read(L"FriendlyName", &varName, 0);
-                    }
-                }
-                if (SUCCEEDED(hr))
-                {
-                    char tempDevicePathUTF8[256];
-                    tempDevicePathUTF8[0] = 0;
-                    WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
-                                        tempDevicePathUTF8,
-                                        sizeof(tempDevicePathUTF8), NULL,
-                                        NULL);
-                    if (strncmp(tempDevicePathUTF8,
-                                (const char*) deviceUniqueIdUTF8,
-                                deviceUniqueIdUTF8Length) == 0)
-                    {
-                        // We have found the requested device
-                        deviceFound = true;
-                        hr = pM->BindToObject(0, 0, IID_IBaseFilter,
-                                              (void**) &captureFilter);
-                        if FAILED(hr)
-                        {
-                            LOG(LS_ERROR) << "Failed to bind to the selected "
-                                          << "capture device " << hr;
-                        }
-
-                        if (productUniqueIdUTF8
-                            && productUniqueIdUTF8Length > 0) // Get the device name
-                        {
-
-                            GetProductId(deviceUniqueIdUTF8,
-                                         productUniqueIdUTF8,
-                                         productUniqueIdUTF8Length);
-                        }
-
-                    }
-                }
-            }
-            VariantClear(&varName);
-            pBag->Release();
-            pM->Release();
+  IBaseFilter* captureFilter = NULL;
+  bool deviceFound = false;
+  while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched) && !deviceFound) {
+    IPropertyBag* pBag;
+    hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
+    if (S_OK == hr) {
+      // Find the description or friendly name.
+      VARIANT varName;
+      VariantInit(&varName);
+      if (deviceUniqueIdUTF8Length > 0) {
+        hr = pBag->Read(L"DevicePath", &varName, 0);
+        if (FAILED(hr)) {
+          hr = pBag->Read(L"Description", &varName, 0);
+          if (FAILED(hr)) {
+            hr = pBag->Read(L"FriendlyName", &varName, 0);
+          }
         }
+        if (SUCCEEDED(hr)) {
+          char tempDevicePathUTF8[256];
+          tempDevicePathUTF8[0] = 0;
+          WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
+                              tempDevicePathUTF8, sizeof(tempDevicePathUTF8),
+                              NULL, NULL);
+          if (strncmp(tempDevicePathUTF8, (const char*)deviceUniqueIdUTF8,
+                      deviceUniqueIdUTF8Length) == 0) {
+            // We have found the requested device
+            deviceFound = true;
+            hr =
+                pM->BindToObject(0, 0, IID_IBaseFilter, (void**)&captureFilter);
+            if
+              FAILED(hr) {
+                LOG(LS_ERROR) << "Failed to bind to the selected "
+                              << "capture device " << hr;
+              }
+
+            if (productUniqueIdUTF8 &&
+                productUniqueIdUTF8Length > 0)  // Get the device name
+            {
+              GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
+                           productUniqueIdUTF8Length);
+            }
+          }
+        }
+      }
+      VariantClear(&varName);
+      pBag->Release();
+      pM->Release();
     }
-    return captureFilter;
+  }
+  return captureFilter;
 }
 
 int32_t DeviceInfoDS::GetWindowsCapability(
@@ -355,397 +295,342 @@
   return 0;
 }
 
-int32_t DeviceInfoDS::CreateCapabilityMap(
-                                         const char* deviceUniqueIdUTF8)
+int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8)
 
 {
-    // Reset old capability list
-    _captureCapabilities.clear();
+  // Reset old capability list
+  _captureCapabilities.clear();
 
-    const int32_t deviceUniqueIdUTF8Length =
-        (int32_t) strlen((char*) deviceUniqueIdUTF8);
-    if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
-    {
-        LOG(LS_INFO) << "Device name too long";
-        return -1;
-    }
-    LOG(LS_INFO) << "CreateCapabilityMap called for device "
-                 << deviceUniqueIdUTF8;
+  const int32_t deviceUniqueIdUTF8Length =
+      (int32_t)strlen((char*)deviceUniqueIdUTF8);
+  if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
+    LOG(LS_INFO) << "Device name too long";
+    return -1;
+  }
+  LOG(LS_INFO) << "CreateCapabilityMap called for device "
+               << deviceUniqueIdUTF8;
 
+  char productId[kVideoCaptureProductIdLength];
+  IBaseFilter* captureDevice = DeviceInfoDS::GetDeviceFilter(
+      deviceUniqueIdUTF8, productId, kVideoCaptureProductIdLength);
+  if (!captureDevice)
+    return -1;
+  IPin* outputCapturePin = GetOutputPin(captureDevice, GUID_NULL);
+  if (!outputCapturePin) {
+    LOG(LS_INFO) << "Failed to get capture device output pin";
+    RELEASE_AND_CLEAR(captureDevice);
+    return -1;
+  }
+  IAMExtDevice* extDevice = NULL;
+  HRESULT hr =
+      captureDevice->QueryInterface(IID_IAMExtDevice, (void**)&extDevice);
+  if (SUCCEEDED(hr) && extDevice) {
+    LOG(LS_INFO) << "This is an external device";
+    extDevice->Release();
+  }
 
-    char productId[kVideoCaptureProductIdLength];
-    IBaseFilter* captureDevice = DeviceInfoDS::GetDeviceFilter(
-                                               deviceUniqueIdUTF8,
-                                               productId,
-                                               kVideoCaptureProductIdLength);
-    if (!captureDevice)
-        return -1;
-    IPin* outputCapturePin = GetOutputPin(captureDevice, GUID_NULL);
-    if (!outputCapturePin)
-    {
-        LOG(LS_INFO) << "Failed to get capture device output pin";
-        RELEASE_AND_CLEAR(captureDevice);
-        return -1;
-    }
-    IAMExtDevice* extDevice = NULL;
-    HRESULT hr = captureDevice->QueryInterface(IID_IAMExtDevice,
-                                               (void **) &extDevice);
-    if (SUCCEEDED(hr) && extDevice)
-    {
-        LOG(LS_INFO) << "This is an external device";
-        extDevice->Release();
-    }
+  IAMStreamConfig* streamConfig = NULL;
+  hr = outputCapturePin->QueryInterface(IID_IAMStreamConfig,
+                                        (void**)&streamConfig);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to get IID_IAMStreamConfig interface "
+                 << "from capture device";
+    return -1;
+  }
 
-    IAMStreamConfig* streamConfig = NULL;
-    hr = outputCapturePin->QueryInterface(IID_IAMStreamConfig,
-                                          (void**) &streamConfig);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to get IID_IAMStreamConfig interface "
-                     <<"from capture device";
-        return -1;
-    }
+  // this  gets the FPS
+  IAMVideoControl* videoControlConfig = NULL;
+  HRESULT hrVC = captureDevice->QueryInterface(IID_IAMVideoControl,
+                                               (void**)&videoControlConfig);
+  if (FAILED(hrVC)) {
+    LOG(LS_INFO) << "IID_IAMVideoControl Interface NOT SUPPORTED";
+  }
 
-    // this  gets the FPS
-    IAMVideoControl* videoControlConfig = NULL;
-    HRESULT hrVC = captureDevice->QueryInterface(IID_IAMVideoControl,
-                                      (void**) &videoControlConfig);
-    if (FAILED(hrVC))
-    {
-        LOG(LS_INFO) << "IID_IAMVideoControl Interface NOT SUPPORTED";
-    }
+  AM_MEDIA_TYPE* pmt = NULL;
+  VIDEO_STREAM_CONFIG_CAPS caps;
+  int count, size;
 
-    AM_MEDIA_TYPE *pmt = NULL;
-    VIDEO_STREAM_CONFIG_CAPS caps;
-    int count, size;
-
-    hr = streamConfig->GetNumberOfCapabilities(&count, &size);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to GetNumberOfCapabilities";
-        RELEASE_AND_CLEAR(videoControlConfig);
-        RELEASE_AND_CLEAR(streamConfig);
-        RELEASE_AND_CLEAR(outputCapturePin);
-        RELEASE_AND_CLEAR(captureDevice);
-        return -1;
-    }
-
-    // Check if the device support formattype == FORMAT_VideoInfo2 and FORMAT_VideoInfo.
-    // Prefer FORMAT_VideoInfo since some cameras (ZureCam) has been seen having problem with MJPEG and FORMAT_VideoInfo2
-    // Interlace flag is only supported in FORMAT_VideoInfo2
-    bool supportFORMAT_VideoInfo2 = false;
-    bool supportFORMAT_VideoInfo = false;
-    bool foundInterlacedFormat = false;
-    GUID preferedVideoFormat = FORMAT_VideoInfo;
-    for (int32_t tmp = 0; tmp < count; ++tmp)
-    {
-        hr = streamConfig->GetStreamCaps(tmp, &pmt,
-                                         reinterpret_cast<BYTE*> (&caps));
-        if (!FAILED(hr))
-        {
-            if (pmt->majortype == MEDIATYPE_Video
-                && pmt->formattype == FORMAT_VideoInfo2)
-            {
-                LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
-                supportFORMAT_VideoInfo2 = true;
-                VIDEOINFOHEADER2* h =
-                    reinterpret_cast<VIDEOINFOHEADER2*> (pmt->pbFormat);
-                assert(h);
-                foundInterlacedFormat |= h->dwInterlaceFlags
-                                        & (AMINTERLACE_IsInterlaced
-                                           | AMINTERLACE_DisplayModeBobOnly);
-            }
-            if (pmt->majortype == MEDIATYPE_Video
-                && pmt->formattype == FORMAT_VideoInfo)
-            {
-                LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
-                supportFORMAT_VideoInfo = true;
-            }
-        }
-    }
-    if (supportFORMAT_VideoInfo2)
-    {
-        if (supportFORMAT_VideoInfo && !foundInterlacedFormat)
-        {
-            preferedVideoFormat = FORMAT_VideoInfo;
-        }
-        else
-        {
-            preferedVideoFormat = FORMAT_VideoInfo2;
-        }
-    }
-
-    for (int32_t tmp = 0; tmp < count; ++tmp)
-    {
-        hr = streamConfig->GetStreamCaps(tmp, &pmt,
-                                         reinterpret_cast<BYTE*> (&caps));
-        if (FAILED(hr))
-        {
-            LOG(LS_INFO) << "Failed to GetStreamCaps";
-            RELEASE_AND_CLEAR(videoControlConfig);
-            RELEASE_AND_CLEAR(streamConfig);
-            RELEASE_AND_CLEAR(outputCapturePin);
-            RELEASE_AND_CLEAR(captureDevice);
-            return -1;
-        }
-
-        if (pmt->majortype == MEDIATYPE_Video
-            && pmt->formattype == preferedVideoFormat)
-        {
-
-            VideoCaptureCapabilityWindows capability;
-            int64_t avgTimePerFrame = 0;
-
-            if (pmt->formattype == FORMAT_VideoInfo)
-            {
-                VIDEOINFOHEADER* h =
-                    reinterpret_cast<VIDEOINFOHEADER*> (pmt->pbFormat);
-                assert(h);
-                capability.directShowCapabilityIndex = tmp;
-                capability.width = h->bmiHeader.biWidth;
-                capability.height = h->bmiHeader.biHeight;
-                avgTimePerFrame = h->AvgTimePerFrame;
-            }
-            if (pmt->formattype == FORMAT_VideoInfo2)
-            {
-                VIDEOINFOHEADER2* h =
-                    reinterpret_cast<VIDEOINFOHEADER2*> (pmt->pbFormat);
-                assert(h);
-                capability.directShowCapabilityIndex = tmp;
-                capability.width = h->bmiHeader.biWidth;
-                capability.height = h->bmiHeader.biHeight;
-                capability.interlaced = h->dwInterlaceFlags
-                                        & (AMINTERLACE_IsInterlaced
-                                           | AMINTERLACE_DisplayModeBobOnly);
-                avgTimePerFrame = h->AvgTimePerFrame;
-            }
-
-            if (hrVC == S_OK)
-            {
-                LONGLONG *frameDurationList;
-                LONGLONG maxFPS;
-                long listSize;
-                SIZE size;
-                size.cx = capability.width;
-                size.cy = capability.height;
-
-                // GetMaxAvailableFrameRate doesn't return max frame rate always
-                // eg: Logitech Notebook. This may be due to a bug in that API
-                // because GetFrameRateList array is reversed in the above camera. So
-                // a util method written. Can't assume the first value will return
-                // the max fps.
-                hrVC = videoControlConfig->GetFrameRateList(outputCapturePin,
-                                                            tmp, size,
-                                                            &listSize,
-                                                            &frameDurationList);
-
-                // On some odd cameras, you may get a 0 for duration.
-                // GetMaxOfFrameArray returns the lowest duration (highest FPS)
-                if (hrVC == S_OK && listSize > 0 &&
-                    0 != (maxFPS = GetMaxOfFrameArray(frameDurationList,
-                                                      listSize)))
-                {
-                    capability.maxFPS = static_cast<int> (10000000
-                                                           / maxFPS);
-                    capability.supportFrameRateControl = true;
-                }
-                else // use existing method
-                {
-                    LOG(LS_INFO) << "GetMaxAvailableFrameRate NOT SUPPORTED";
-                    if (avgTimePerFrame > 0)
-                        capability.maxFPS = static_cast<int> (10000000
-                                                               / avgTimePerFrame);
-                    else
-                        capability.maxFPS = 0;
-                }
-            }
-            else // use existing method in case IAMVideoControl is not supported
-            {
-                if (avgTimePerFrame > 0)
-                    capability.maxFPS = static_cast<int> (10000000
-                                                           / avgTimePerFrame);
-                else
-                    capability.maxFPS = 0;
-            }
-
-            // can't switch MEDIATYPE :~(
-            if (pmt->subtype == MEDIASUBTYPE_I420)
-            {
-              capability.videoType = VideoType::kI420;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_IYUV)
-            {
-              capability.videoType = VideoType::kIYUV;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_RGB24)
-            {
-              capability.videoType = VideoType::kRGB24;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_YUY2)
-            {
-              capability.videoType = VideoType::kYUY2;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_RGB565)
-            {
-              capability.videoType = VideoType::kRGB565;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_MJPG)
-            {
-              capability.videoType = VideoType::kMJPEG;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_dvsl
-                    || pmt->subtype == MEDIASUBTYPE_dvsd
-                    || pmt->subtype == MEDIASUBTYPE_dvhd) // If this is an external DV camera
-            {
-              capability.videoType =
-                  VideoType::kYUY2;  // MS DV filter seems to create this type
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_UYVY) // Seen used by Declink capture cards
-            {
-              capability.videoType = VideoType::kUYVY;
-            }
-            else if (pmt->subtype == MEDIASUBTYPE_HDYC) // Seen used by Declink capture cards. Uses BT. 709 color. Not entiry correct to use UYVY. http://en.wikipedia.org/wiki/YCbCr
-            {
-                LOG(LS_INFO) << "Device support HDYC.";
-                capability.videoType = VideoType::kUYVY;
-            }
-            else
-            {
-                WCHAR strGuid[39];
-                StringFromGUID2(pmt->subtype, strGuid, 39);
-                LOG(LS_WARNING) << "Device support unknown media type "
-                                << strGuid << ", width " << capability.width
-                                << ", height " << capability.height;
-                continue;
-            }
-
-            _captureCapabilities.push_back(capability);
-            _captureCapabilitiesWindows.push_back(capability);
-            LOG(LS_INFO) << "Camera capability, width:" << capability.width
-                         << " height:" << capability.height << " type:"
-                         << static_cast<int>(capability.videoType) << " fps:"
-                         << capability.maxFPS;
-        }
-        DeleteMediaType(pmt);
-        pmt = NULL;
-    }
-    RELEASE_AND_CLEAR(streamConfig);
+  hr = streamConfig->GetNumberOfCapabilities(&count, &size);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to GetNumberOfCapabilities";
     RELEASE_AND_CLEAR(videoControlConfig);
+    RELEASE_AND_CLEAR(streamConfig);
     RELEASE_AND_CLEAR(outputCapturePin);
-    RELEASE_AND_CLEAR(captureDevice); // Release the capture device
+    RELEASE_AND_CLEAR(captureDevice);
+    return -1;
+  }
 
-    // Store the new used device name
-    _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
-    _lastUsedDeviceName = (char*) realloc(_lastUsedDeviceName,
-                                                   _lastUsedDeviceNameLength
-                                                       + 1);
-    memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8, _lastUsedDeviceNameLength+ 1);
-    LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+  // Check if the device support formattype == FORMAT_VideoInfo2 and
+  // FORMAT_VideoInfo. Prefer FORMAT_VideoInfo since some cameras (ZureCam) has
+  // been seen having problem with MJPEG and FORMAT_VideoInfo2 Interlace flag is
+  // only supported in FORMAT_VideoInfo2
+  bool supportFORMAT_VideoInfo2 = false;
+  bool supportFORMAT_VideoInfo = false;
+  bool foundInterlacedFormat = false;
+  GUID preferedVideoFormat = FORMAT_VideoInfo;
+  for (int32_t tmp = 0; tmp < count; ++tmp) {
+    hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
+    if (!FAILED(hr)) {
+      if (pmt->majortype == MEDIATYPE_Video &&
+          pmt->formattype == FORMAT_VideoInfo2) {
+        LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
+        supportFORMAT_VideoInfo2 = true;
+        VIDEOINFOHEADER2* h =
+            reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+        assert(h);
+        foundInterlacedFormat |=
+            h->dwInterlaceFlags &
+            (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
+      }
+      if (pmt->majortype == MEDIATYPE_Video &&
+          pmt->formattype == FORMAT_VideoInfo) {
+        LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
+        supportFORMAT_VideoInfo = true;
+      }
+    }
+  }
+  if (supportFORMAT_VideoInfo2) {
+    if (supportFORMAT_VideoInfo && !foundInterlacedFormat) {
+      preferedVideoFormat = FORMAT_VideoInfo;
+    } else {
+      preferedVideoFormat = FORMAT_VideoInfo2;
+    }
+  }
 
-    return static_cast<int32_t>(_captureCapabilities.size());
+  for (int32_t tmp = 0; tmp < count; ++tmp) {
+    hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
+    if (FAILED(hr)) {
+      LOG(LS_INFO) << "Failed to GetStreamCaps";
+      RELEASE_AND_CLEAR(videoControlConfig);
+      RELEASE_AND_CLEAR(streamConfig);
+      RELEASE_AND_CLEAR(outputCapturePin);
+      RELEASE_AND_CLEAR(captureDevice);
+      return -1;
+    }
+
+    if (pmt->majortype == MEDIATYPE_Video &&
+        pmt->formattype == preferedVideoFormat) {
+      VideoCaptureCapabilityWindows capability;
+      int64_t avgTimePerFrame = 0;
+
+      if (pmt->formattype == FORMAT_VideoInfo) {
+        VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
+        assert(h);
+        capability.directShowCapabilityIndex = tmp;
+        capability.width = h->bmiHeader.biWidth;
+        capability.height = h->bmiHeader.biHeight;
+        avgTimePerFrame = h->AvgTimePerFrame;
+      }
+      if (pmt->formattype == FORMAT_VideoInfo2) {
+        VIDEOINFOHEADER2* h =
+            reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+        assert(h);
+        capability.directShowCapabilityIndex = tmp;
+        capability.width = h->bmiHeader.biWidth;
+        capability.height = h->bmiHeader.biHeight;
+        capability.interlaced =
+            h->dwInterlaceFlags &
+            (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
+        avgTimePerFrame = h->AvgTimePerFrame;
+      }
+
+      if (hrVC == S_OK) {
+        LONGLONG* frameDurationList;
+        LONGLONG maxFPS;
+        long listSize;
+        SIZE size;
+        size.cx = capability.width;
+        size.cy = capability.height;
+
+        // GetMaxAvailableFrameRate doesn't return max frame rate always
+        // eg: Logitech Notebook. This may be due to a bug in that API
+        // because GetFrameRateList array is reversed in the above camera. So
+        // a util method written. Can't assume the first value will return
+        // the max fps.
+        hrVC = videoControlConfig->GetFrameRateList(
+            outputCapturePin, tmp, size, &listSize, &frameDurationList);
+
+        // On some odd cameras, you may get a 0 for duration.
+        // GetMaxOfFrameArray returns the lowest duration (highest FPS)
+        if (hrVC == S_OK && listSize > 0 &&
+            0 != (maxFPS = GetMaxOfFrameArray(frameDurationList, listSize))) {
+          capability.maxFPS = static_cast<int>(10000000 / maxFPS);
+          capability.supportFrameRateControl = true;
+        } else  // use existing method
+        {
+          LOG(LS_INFO) << "GetMaxAvailableFrameRate NOT SUPPORTED";
+          if (avgTimePerFrame > 0)
+            capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
+          else
+            capability.maxFPS = 0;
+        }
+      } else  // use existing method in case IAMVideoControl is not supported
+      {
+        if (avgTimePerFrame > 0)
+          capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
+        else
+          capability.maxFPS = 0;
+      }
+
+      // can't switch MEDIATYPE :~(
+      if (pmt->subtype == MEDIASUBTYPE_I420) {
+        capability.videoType = VideoType::kI420;
+      } else if (pmt->subtype == MEDIASUBTYPE_IYUV) {
+        capability.videoType = VideoType::kIYUV;
+      } else if (pmt->subtype == MEDIASUBTYPE_RGB24) {
+        capability.videoType = VideoType::kRGB24;
+      } else if (pmt->subtype == MEDIASUBTYPE_YUY2) {
+        capability.videoType = VideoType::kYUY2;
+      } else if (pmt->subtype == MEDIASUBTYPE_RGB565) {
+        capability.videoType = VideoType::kRGB565;
+      } else if (pmt->subtype == MEDIASUBTYPE_MJPG) {
+        capability.videoType = VideoType::kMJPEG;
+      } else if (pmt->subtype == MEDIASUBTYPE_dvsl ||
+                 pmt->subtype == MEDIASUBTYPE_dvsd ||
+                 pmt->subtype ==
+                     MEDIASUBTYPE_dvhd)  // If this is an external DV camera
+      {
+        capability.videoType =
+            VideoType::kYUY2;  // MS DV filter seems to create this type
+      } else if (pmt->subtype ==
+                 MEDIASUBTYPE_UYVY)  // Seen used by Declink capture cards
+      {
+        capability.videoType = VideoType::kUYVY;
+      } else if (pmt->subtype ==
+                 MEDIASUBTYPE_HDYC)  // Seen used by Declink capture cards. Uses
+                                     // BT. 709 color. Not entiry correct to use
+                                     // UYVY. http://en.wikipedia.org/wiki/YCbCr
+      {
+        LOG(LS_INFO) << "Device support HDYC.";
+        capability.videoType = VideoType::kUYVY;
+      } else {
+        WCHAR strGuid[39];
+        StringFromGUID2(pmt->subtype, strGuid, 39);
+        LOG(LS_WARNING) << "Device support unknown media type " << strGuid
+                        << ", width " << capability.width << ", height "
+                        << capability.height;
+        continue;
+      }
+
+      _captureCapabilities.push_back(capability);
+      _captureCapabilitiesWindows.push_back(capability);
+      LOG(LS_INFO) << "Camera capability, width:" << capability.width
+                   << " height:" << capability.height
+                   << " type:" << static_cast<int>(capability.videoType)
+                   << " fps:" << capability.maxFPS;
+    }
+    DeleteMediaType(pmt);
+    pmt = NULL;
+  }
+  RELEASE_AND_CLEAR(streamConfig);
+  RELEASE_AND_CLEAR(videoControlConfig);
+  RELEASE_AND_CLEAR(outputCapturePin);
+  RELEASE_AND_CLEAR(captureDevice);  // Release the capture device
+
+  // Store the new used device name
+  _lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
+  _lastUsedDeviceName =
+      (char*)realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1);
+  memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
+         _lastUsedDeviceNameLength + 1);
+  LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
+
+  return static_cast<int32_t>(_captureCapabilities.size());
 }
 
-/* Constructs a product ID from the Windows DevicePath. on a USB device the devicePath contains product id and vendor id.
- This seems to work for firewire as well
+/* Constructs a product ID from the Windows DevicePath. on a USB device the
+ devicePath contains product id and vendor id. This seems to work for firewire
+ as well
  /* Example of device path
  "\\?\usb#vid_0408&pid_2010&mi_00#7&258e7aaf&0&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
  "\\?\avc#sony&dv-vcr&camcorder&dv#65b2d50301460008#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
  */
 void DeviceInfoDS::GetProductId(const char* devicePath,
-                                      char* productUniqueIdUTF8,
-                                      uint32_t productUniqueIdUTF8Length)
-{
-    *productUniqueIdUTF8 = '\0';
-    char* startPos = strstr((char*) devicePath, "\\\\?\\");
-    if (!startPos)
-    {
-        strncpy_s((char*) productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
-        LOG(LS_INFO) << "Failed to get the product Id";
-        return;
-    }
-    startPos += 4;
+                                char* productUniqueIdUTF8,
+                                uint32_t productUniqueIdUTF8Length) {
+  *productUniqueIdUTF8 = '\0';
+  char* startPos = strstr((char*)devicePath, "\\\\?\\");
+  if (!startPos) {
+    strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+    LOG(LS_INFO) << "Failed to get the product Id";
+    return;
+  }
+  startPos += 4;
 
-    char* pos = strchr(startPos, '&');
-    if (!pos || pos >= (char*) devicePath + strlen((char*) devicePath))
-    {
-        strncpy_s((char*) productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
-        LOG(LS_INFO) << "Failed to get the product Id";
-        return;
-    }
-    // Find the second occurrence.
-    pos = strchr(pos + 1, '&');
-    uint32_t bytesToCopy = (uint32_t)(pos - startPos);
-    if (pos && (bytesToCopy <= productUniqueIdUTF8Length) && bytesToCopy
-        <= kVideoCaptureProductIdLength)
-    {
-        strncpy_s((char*) productUniqueIdUTF8, productUniqueIdUTF8Length,
-                  (char*) startPos, bytesToCopy);
-    }
-    else
-    {
-        strncpy_s((char*) productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
-        LOG(LS_INFO) << "Failed to get the product Id";
-    }
+  char* pos = strchr(startPos, '&');
+  if (!pos || pos >= (char*)devicePath + strlen((char*)devicePath)) {
+    strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+    LOG(LS_INFO) << "Failed to get the product Id";
+    return;
+  }
+  // Find the second occurrence.
+  pos = strchr(pos + 1, '&');
+  uint32_t bytesToCopy = (uint32_t)(pos - startPos);
+  if (pos && (bytesToCopy <= productUniqueIdUTF8Length) &&
+      bytesToCopy <= kVideoCaptureProductIdLength) {
+    strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length,
+              (char*)startPos, bytesToCopy);
+  } else {
+    strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
+    LOG(LS_INFO) << "Failed to get the product Id";
+  }
 }
 
 int32_t DeviceInfoDS::DisplayCaptureSettingsDialogBox(
-                                         const char* deviceUniqueIdUTF8,
-                                         const char* dialogTitleUTF8,
-                                         void* parentWindow,
-                                         uint32_t positionX,
-                                         uint32_t positionY)
-{
-    ReadLockScoped cs(_apiLock);
-    HWND window = (HWND) parentWindow;
+    const char* deviceUniqueIdUTF8,
+    const char* dialogTitleUTF8,
+    void* parentWindow,
+    uint32_t positionX,
+    uint32_t positionY) {
+  ReadLockScoped cs(_apiLock);
+  HWND window = (HWND)parentWindow;
 
-    IBaseFilter* filter = GetDeviceFilter(deviceUniqueIdUTF8, NULL, 0);
-    if (!filter)
-        return -1;
+  IBaseFilter* filter = GetDeviceFilter(deviceUniqueIdUTF8, NULL, 0);
+  if (!filter)
+    return -1;
 
-    ISpecifyPropertyPages* pPages = NULL;
-    CAUUID uuid;
-    HRESULT hr = S_OK;
+  ISpecifyPropertyPages* pPages = NULL;
+  CAUUID uuid;
+  HRESULT hr = S_OK;
 
-    hr = filter->QueryInterface(IID_ISpecifyPropertyPages, (LPVOID*) &pPages);
-    if (!SUCCEEDED(hr))
-    {
-        filter->Release();
-        return -1;
-    }
-    hr = pPages->GetPages(&uuid);
-    if (!SUCCEEDED(hr))
-    {
-        filter->Release();
-        return -1;
-    }
-
-    WCHAR tempDialogTitleWide[256];
-    tempDialogTitleWide[0] = 0;
-    int size = 255;
-
-    // UTF-8 to wide char
-    MultiByteToWideChar(CP_UTF8, 0, (char*) dialogTitleUTF8, -1,
-                        tempDialogTitleWide, size);
-
-    // Invoke a dialog box to display.
-
-    hr = OleCreatePropertyFrame(window, // You must create the parent window.
-                                positionX, // Horizontal position for the dialog box.
-                                positionY, // Vertical position for the dialog box.
-                                tempDialogTitleWide,// String used for the dialog box caption.
-                                1, // Number of pointers passed in pPlugin.
-                                (LPUNKNOWN*) &filter, // Pointer to the filter.
-                                uuid.cElems, // Number of property pages.
-                                uuid.pElems, // Array of property page CLSIDs.
-                                LOCALE_USER_DEFAULT, // Locale ID for the dialog box.
-                                0, NULL); // Reserved
-    // Release memory.
-    if (uuid.pElems)
-    {
-        CoTaskMemFree(uuid.pElems);
-    }
+  hr = filter->QueryInterface(IID_ISpecifyPropertyPages, (LPVOID*)&pPages);
+  if (!SUCCEEDED(hr)) {
     filter->Release();
-    return 0;
+    return -1;
+  }
+  hr = pPages->GetPages(&uuid);
+  if (!SUCCEEDED(hr)) {
+    filter->Release();
+    return -1;
+  }
+
+  WCHAR tempDialogTitleWide[256];
+  tempDialogTitleWide[0] = 0;
+  int size = 255;
+
+  // UTF-8 to wide char
+  MultiByteToWideChar(CP_UTF8, 0, (char*)dialogTitleUTF8, -1,
+                      tempDialogTitleWide, size);
+
+  // Invoke a dialog box to display.
+
+  hr = OleCreatePropertyFrame(
+      window,               // You must create the parent window.
+      positionX,            // Horizontal position for the dialog box.
+      positionY,            // Vertical position for the dialog box.
+      tempDialogTitleWide,  // String used for the dialog box caption.
+      1,                    // Number of pointers passed in pPlugin.
+      (LPUNKNOWN*)&filter,  // Pointer to the filter.
+      uuid.cElems,          // Number of property pages.
+      uuid.pElems,          // Array of property page CLSIDs.
+      LOCALE_USER_DEFAULT,  // Locale ID for the dialog box.
+      0, NULL);             // Reserved
+  // Release memory.
+  if (uuid.pElems) {
+    CoTaskMemFree(uuid.pElems);
+  }
+  filter->Release();
+  return 0;
 }
 }  // namespace videocapturemodule
 }  // namespace webrtc
diff --git a/modules/video_capture/windows/sink_filter_ds.cc b/modules/video_capture/windows/sink_filter_ds.cc
index 1b2138a..f982fce 100644
--- a/modules/video_capture/windows/sink_filter_ds.cc
+++ b/modules/video_capture/windows/sink_filter_ds.cc
@@ -17,495 +17,429 @@
 #include "rtc_base/logging.h"
 #include "rtc_base/platform_thread.h"
 
-#include <Dvdmedia.h> // VIDEOINFOHEADER2
+#include <Dvdmedia.h>  // VIDEOINFOHEADER2
 #include <initguid.h>
 
-#define DELETE_RESET(p) { delete (p) ; (p) = NULL ;}
+#define DELETE_RESET(p) \
+  {                     \
+    delete (p);         \
+    (p) = NULL;         \
+  }
 
-DEFINE_GUID(CLSID_SINKFILTER, 0x88cdbbdc, 0xa73b, 0x4afa, 0xac, 0xbf, 0x15, 0xd5,
-            0xe2, 0xce, 0x12, 0xc3);
+DEFINE_GUID(CLSID_SINKFILTER,
+            0x88cdbbdc,
+            0xa73b,
+            0x4afa,
+            0xac,
+            0xbf,
+            0x15,
+            0xd5,
+            0xe2,
+            0xce,
+            0x12,
+            0xc3);
 
-namespace webrtc
-{
-namespace videocapturemodule
-{
+namespace webrtc {
+namespace videocapturemodule {
 
-typedef struct tagTHREADNAME_INFO
-{
-   DWORD dwType;        // must be 0x1000
-   LPCSTR szName;       // pointer to name (in user addr space)
-   DWORD dwThreadID;    // thread ID (-1=caller thread)
-   DWORD dwFlags;       // reserved for future use, must be zero
+typedef struct tagTHREADNAME_INFO {
+  DWORD dwType;      // must be 0x1000
+  LPCSTR szName;     // pointer to name (in user addr space)
+  DWORD dwThreadID;  // thread ID (-1=caller thread)
+  DWORD dwFlags;     // reserved for future use, must be zero
 } THREADNAME_INFO;
 
-CaptureInputPin::CaptureInputPin (IN TCHAR * szName,
-                            IN CaptureSinkFilter* pFilter,
-                            IN CCritSec * pLock,
-                            OUT HRESULT * pHr,
-                            IN LPCWSTR pszName)
-    : CBaseInputPin (szName, pFilter, pLock, pHr, pszName),
+CaptureInputPin::CaptureInputPin(IN TCHAR* szName,
+                                 IN CaptureSinkFilter* pFilter,
+                                 IN CCritSec* pLock,
+                                 OUT HRESULT* pHr,
+                                 IN LPCWSTR pszName)
+    : CBaseInputPin(szName, pFilter, pLock, pHr, pszName),
       _requestedCapability(),
-      _resultingCapability()
-{
-    _threadHandle = NULL;
+      _resultingCapability() {
+  _threadHandle = NULL;
 }
 
-CaptureInputPin::~CaptureInputPin()
-{
+CaptureInputPin::~CaptureInputPin() {}
+
+HRESULT
+CaptureInputPin::GetMediaType(IN int iPosition, OUT CMediaType* pmt) {
+  // reset the thread handle
+  _threadHandle = NULL;
+
+  if (iPosition < 0)
+    return E_INVALIDARG;
+
+  VIDEOINFOHEADER* pvi =
+      (VIDEOINFOHEADER*)pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
+  if (NULL == pvi) {
+    LOG(LS_INFO) << "CheckMediaType VIDEOINFOHEADER is NULL. Returning.";
+    return (E_OUTOFMEMORY);
+  }
+
+  ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
+  pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+  pvi->bmiHeader.biPlanes = 1;
+  pvi->bmiHeader.biClrImportant = 0;
+  pvi->bmiHeader.biClrUsed = 0;
+  if (_requestedCapability.maxFPS != 0) {
+    pvi->AvgTimePerFrame = 10000000 / _requestedCapability.maxFPS;
+  }
+
+  SetRectEmpty(&(pvi->rcSource));  // we want the whole image area rendered.
+  SetRectEmpty(&(pvi->rcTarget));  // no particular destination rectangle
+
+  pmt->SetType(&MEDIATYPE_Video);
+  pmt->SetFormatType(&FORMAT_VideoInfo);
+  pmt->SetTemporalCompression(FALSE);
+
+  int32_t positionOffset = 1;
+  switch (iPosition + positionOffset) {
+    case 0: {
+      pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
+      pvi->bmiHeader.biBitCount = 12;  // bit per pixel
+      pvi->bmiHeader.biWidth = _requestedCapability.width;
+      pvi->bmiHeader.biHeight = _requestedCapability.height;
+      pvi->bmiHeader.biSizeImage =
+          3 * _requestedCapability.height * _requestedCapability.width / 2;
+      pmt->SetSubtype(&MEDIASUBTYPE_I420);
+    } break;
+    case 1: {
+      pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
+      ;
+      pvi->bmiHeader.biBitCount = 16;  // bit per pixel
+      pvi->bmiHeader.biWidth = _requestedCapability.width;
+      pvi->bmiHeader.biHeight = _requestedCapability.height;
+      pvi->bmiHeader.biSizeImage =
+          2 * _requestedCapability.width * _requestedCapability.height;
+      pmt->SetSubtype(&MEDIASUBTYPE_YUY2);
+    } break;
+    case 2: {
+      pvi->bmiHeader.biCompression = BI_RGB;
+      pvi->bmiHeader.biBitCount = 24;  // bit per pixel
+      pvi->bmiHeader.biWidth = _requestedCapability.width;
+      pvi->bmiHeader.biHeight = _requestedCapability.height;
+      pvi->bmiHeader.biSizeImage =
+          3 * _requestedCapability.height * _requestedCapability.width;
+      pmt->SetSubtype(&MEDIASUBTYPE_RGB24);
+    } break;
+    case 3: {
+      pvi->bmiHeader.biCompression = MAKEFOURCC('U', 'Y', 'V', 'Y');
+      pvi->bmiHeader.biBitCount = 16;  // bit per pixel
+      pvi->bmiHeader.biWidth = _requestedCapability.width;
+      pvi->bmiHeader.biHeight = _requestedCapability.height;
+      pvi->bmiHeader.biSizeImage =
+          2 * _requestedCapability.height * _requestedCapability.width;
+      pmt->SetSubtype(&MEDIASUBTYPE_UYVY);
+    } break;
+    case 4: {
+      pvi->bmiHeader.biCompression = MAKEFOURCC('M', 'J', 'P', 'G');
+      pvi->bmiHeader.biBitCount = 12;  // bit per pixel
+      pvi->bmiHeader.biWidth = _requestedCapability.width;
+      pvi->bmiHeader.biHeight = _requestedCapability.height;
+      pvi->bmiHeader.biSizeImage =
+          3 * _requestedCapability.height * _requestedCapability.width / 2;
+      pmt->SetSubtype(&MEDIASUBTYPE_MJPG);
+    } break;
+    default:
+      return VFW_S_NO_MORE_ITEMS;
+  }
+  pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);
+  LOG(LS_INFO) << "GetMediaType position " << iPosition << ", width "
+               << _requestedCapability.width << ", height "
+               << _requestedCapability.height << ", biCompression 0x"
+               << std::hex << pvi->bmiHeader.biCompression;
+  return NOERROR;
 }
 
 HRESULT
-CaptureInputPin::GetMediaType (IN int iPosition, OUT CMediaType * pmt)
-{
-    // reset the thread handle
-    _threadHandle = NULL;
+CaptureInputPin::CheckMediaType(IN const CMediaType* pMediaType) {
+  // reset the thread handle
+  _threadHandle = NULL;
 
-    if(iPosition < 0)
+  const GUID* type = pMediaType->Type();
+  if (*type != MEDIATYPE_Video)
     return E_INVALIDARG;
 
-    VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*) pmt->AllocFormatBuffer(
-                            sizeof(VIDEOINFOHEADER));
-    if(NULL == pvi)
-    {
-        LOG(LS_INFO) << "CheckMediaType VIDEOINFOHEADER is NULL. Returning.";
-        return(E_OUTOFMEMORY);
+  const GUID* formatType = pMediaType->FormatType();
+
+  // Check for the subtypes we support
+  const GUID* SubType = pMediaType->Subtype();
+  if (SubType == NULL) {
+    return E_INVALIDARG;
+  }
+
+  if (*formatType == FORMAT_VideoInfo) {
+    VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*)pMediaType->Format();
+    if (pvi == NULL) {
+      return E_INVALIDARG;
     }
 
-    ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
-    pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
-    pvi->bmiHeader.biPlanes = 1;
-    pvi->bmiHeader.biClrImportant = 0;
-    pvi->bmiHeader.biClrUsed = 0;
-    if (_requestedCapability.maxFPS != 0) {
-        pvi->AvgTimePerFrame = 10000000/_requestedCapability.maxFPS;
+    // Store the incoming width and height
+    _resultingCapability.width = pvi->bmiHeader.biWidth;
+
+    // Store the incoming height,
+    // for RGB24 we assume the frame to be upside down
+    if (*SubType == MEDIASUBTYPE_RGB24 && pvi->bmiHeader.biHeight > 0) {
+      _resultingCapability.height = -(pvi->bmiHeader.biHeight);
+    } else {
+      _resultingCapability.height = abs(pvi->bmiHeader.biHeight);
     }
 
-    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
-    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle
-
-    pmt->SetType(&MEDIATYPE_Video);
-    pmt->SetFormatType(&FORMAT_VideoInfo);
-    pmt->SetTemporalCompression(FALSE);
-
-    int32_t positionOffset=1;
-    switch (iPosition+positionOffset)
-    {
-        case 0:
-        {
-            pvi->bmiHeader.biCompression = MAKEFOURCC('I','4','2','0');
-            pvi->bmiHeader.biBitCount = 12; //bit per pixel
-            pvi->bmiHeader.biWidth = _requestedCapability.width;
-            pvi->bmiHeader.biHeight = _requestedCapability.height;
-            pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
-                                        *_requestedCapability.width/2;
-            pmt->SetSubtype(&MEDIASUBTYPE_I420);
-        }
-        break;
-        case 1:
-        {
-            pvi->bmiHeader.biCompression = MAKEFOURCC('Y','U','Y','2');;
-            pvi->bmiHeader.biBitCount = 16; //bit per pixel
-            pvi->bmiHeader.biWidth = _requestedCapability.width;
-            pvi->bmiHeader.biHeight = _requestedCapability.height;
-            pvi->bmiHeader.biSizeImage = 2*_requestedCapability.width
-                                        *_requestedCapability.height;
-            pmt->SetSubtype(&MEDIASUBTYPE_YUY2);
-        }
-        break;
-        case 2:
-        {
-            pvi->bmiHeader.biCompression = BI_RGB;
-            pvi->bmiHeader.biBitCount = 24; //bit per pixel
-            pvi->bmiHeader.biWidth = _requestedCapability.width;
-            pvi->bmiHeader.biHeight = _requestedCapability.height;
-            pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
-                                        *_requestedCapability.width;
-            pmt->SetSubtype(&MEDIASUBTYPE_RGB24);
-        }
-        break;
-        case 3:
-        {
-            pvi->bmiHeader.biCompression = MAKEFOURCC('U','Y','V','Y');
-            pvi->bmiHeader.biBitCount = 16; //bit per pixel
-            pvi->bmiHeader.biWidth = _requestedCapability.width;
-            pvi->bmiHeader.biHeight = _requestedCapability.height;
-            pvi->bmiHeader.biSizeImage = 2*_requestedCapability.height
-                                         *_requestedCapability.width;
-            pmt->SetSubtype(&MEDIASUBTYPE_UYVY);
-        }
-        break;
-        case 4:
-        {
-            pvi->bmiHeader.biCompression = MAKEFOURCC('M','J','P','G');
-            pvi->bmiHeader.biBitCount = 12; //bit per pixel
-            pvi->bmiHeader.biWidth = _requestedCapability.width;
-            pvi->bmiHeader.biHeight = _requestedCapability.height;
-            pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
-                                         *_requestedCapability.width/2;
-            pmt->SetSubtype(&MEDIASUBTYPE_MJPG);
-        }
-        break;
-        default :
-        return VFW_S_NO_MORE_ITEMS;
-    }
-    pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);
-    LOG(LS_INFO) << "GetMediaType position " << iPosition << ", width "
-                 << _requestedCapability.width << ", height "
-                 << _requestedCapability.height << ", biCompression 0x"
+    LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
+                 << " height:" << pvi->bmiHeader.biHeight << " Compression:0x"
                  << std::hex << pvi->bmiHeader.biCompression;
-    return NOERROR;
+
+    if (*SubType == MEDIASUBTYPE_MJPG &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
+      _resultingCapability.videoType = VideoType::kMJPEG;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_I420 &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
+      _resultingCapability.videoType = VideoType::kI420;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_YUY2 &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
+      _resultingCapability.videoType = VideoType::kYUY2;
+      ::Sleep(60);  // workaround for bad driver
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_UYVY &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
+      _resultingCapability.videoType = VideoType::kUYVY;
+      return S_OK;  // This format is acceptable.
+    }
+
+    if (*SubType == MEDIASUBTYPE_HDYC) {
+      _resultingCapability.videoType = VideoType::kUYVY;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_RGB24 &&
+        pvi->bmiHeader.biCompression == BI_RGB) {
+      _resultingCapability.videoType = VideoType::kRGB24;
+      return S_OK;  // This format is acceptable.
+    }
+  }
+  if (*formatType == FORMAT_VideoInfo2) {
+    // VIDEOINFOHEADER2 that has dwInterlaceFlags
+    VIDEOINFOHEADER2* pvi = (VIDEOINFOHEADER2*)pMediaType->Format();
+
+    if (pvi == NULL) {
+      return E_INVALIDARG;
+    }
+
+    LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
+                 << " height:" << pvi->bmiHeader.biHeight << " Compression:0x"
+                 << std::hex << pvi->bmiHeader.biCompression;
+
+    _resultingCapability.width = pvi->bmiHeader.biWidth;
+
+    // Store the incoming height,
+    // for RGB24 we assume the frame to be upside down
+    if (*SubType == MEDIASUBTYPE_RGB24 && pvi->bmiHeader.biHeight > 0) {
+      _resultingCapability.height = -(pvi->bmiHeader.biHeight);
+    } else {
+      _resultingCapability.height = abs(pvi->bmiHeader.biHeight);
+    }
+
+    if (*SubType == MEDIASUBTYPE_MJPG &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
+      _resultingCapability.videoType = VideoType::kMJPEG;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_I420 &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
+      _resultingCapability.videoType = VideoType::kI420;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_YUY2 &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
+      _resultingCapability.videoType = VideoType::kYUY2;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_UYVY &&
+        pvi->bmiHeader.biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
+      _resultingCapability.videoType = VideoType::kUYVY;
+      return S_OK;  // This format is acceptable.
+    }
+
+    if (*SubType == MEDIASUBTYPE_HDYC) {
+      _resultingCapability.videoType = VideoType::kUYVY;
+      return S_OK;  // This format is acceptable.
+    }
+    if (*SubType == MEDIASUBTYPE_RGB24 &&
+        pvi->bmiHeader.biCompression == BI_RGB) {
+      _resultingCapability.videoType = VideoType::kRGB24;
+      return S_OK;  // This format is acceptable.
+    }
+  }
+  return E_INVALIDARG;
 }
 
 HRESULT
-CaptureInputPin::CheckMediaType ( IN const CMediaType * pMediaType)
-{
-    // reset the thread handle
-    _threadHandle = NULL;
+CaptureInputPin::Receive(IN IMediaSample* pIMediaSample) {
+  HRESULT hr = S_OK;
 
-    const GUID *type = pMediaType->Type();
-    if (*type != MEDIATYPE_Video)
-    return E_INVALIDARG;
+  RTC_DCHECK(m_pFilter);
+  RTC_DCHECK(pIMediaSample);
 
-    const GUID *formatType = pMediaType->FormatType();
+  // get the thread handle of the delivering thread inc its priority
+  if (_threadHandle == NULL) {
+    HANDLE handle = GetCurrentThread();
+    SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
+    _threadHandle = handle;
 
-    // Check for the subtypes we support
-    const GUID *SubType = pMediaType->Subtype();
-    if (SubType == NULL)
-    {
-        return E_INVALIDARG;
+    rtc::SetCurrentThreadName("webrtc_video_capture");
+  }
+
+  reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->LockReceive();
+  hr = CBaseInputPin::Receive(pIMediaSample);
+
+  if (SUCCEEDED(hr)) {
+    const LONG length = pIMediaSample->GetActualDataLength();
+    RTC_DCHECK(length >= 0);
+
+    unsigned char* pBuffer = NULL;
+    if (S_OK != pIMediaSample->GetPointer(&pBuffer)) {
+      reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
+      return S_FALSE;
     }
 
-    if(*formatType == FORMAT_VideoInfo)
-    {
-        VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pMediaType->Format();
-        if(pvi == NULL)
-        {
-            return E_INVALIDARG;
-        }
+    // NOTE: filter unlocked within Send call
+    reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->ProcessCapturedFrame(
+        pBuffer, static_cast<size_t>(length), _resultingCapability);
+  } else {
+    reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
+  }
 
-        // Store the incoming width and height
-        _resultingCapability.width = pvi->bmiHeader.biWidth;
-
-        // Store the incoming height,
-        // for RGB24 we assume the frame to be upside down
-        if(*SubType == MEDIASUBTYPE_RGB24
-            && pvi->bmiHeader.biHeight > 0)
-        {
-           _resultingCapability.height = -(pvi->bmiHeader.biHeight);
-        }
-        else
-        {
-           _resultingCapability.height = abs(pvi->bmiHeader.biHeight);
-        }
-
-        LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
-                     << " height:" << pvi->bmiHeader.biHeight
-                     << " Compression:0x" << std::hex
-                     << pvi->bmiHeader.biCompression;
-
-        if(*SubType == MEDIASUBTYPE_MJPG
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('M','J','P','G'))
-        {
-          _resultingCapability.videoType = VideoType::kMJPEG;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_I420
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('I','4','2','0'))
-        {
-          _resultingCapability.videoType = VideoType::kI420;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_YUY2
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('Y','U','Y','2'))
-        {
-          _resultingCapability.videoType = VideoType::kYUY2;
-          ::Sleep(60);  // workaround for bad driver
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_UYVY
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('U','Y','V','Y'))
-        {
-          _resultingCapability.videoType = VideoType::kUYVY;
-          return S_OK;  // This format is acceptable.
-        }
-
-        if(*SubType == MEDIASUBTYPE_HDYC)
-        {
-          _resultingCapability.videoType = VideoType::kUYVY;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_RGB24
-            && pvi->bmiHeader.biCompression == BI_RGB)
-        {
-          _resultingCapability.videoType = VideoType::kRGB24;
-          return S_OK;  // This format is acceptable.
-        }
-    }
-    if(*formatType == FORMAT_VideoInfo2)
-    {
-        // VIDEOINFOHEADER2 that has dwInterlaceFlags
-        VIDEOINFOHEADER2 *pvi = (VIDEOINFOHEADER2 *) pMediaType->Format();
-
-        if(pvi == NULL)
-        {
-            return E_INVALIDARG;
-        }
-
-        LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
-                     << " height:" << pvi->bmiHeader.biHeight
-                     << " Compression:0x" << std::hex
-                     << pvi->bmiHeader.biCompression;
-
-        _resultingCapability.width = pvi->bmiHeader.biWidth;
-
-        // Store the incoming height,
-        // for RGB24 we assume the frame to be upside down
-        if(*SubType == MEDIASUBTYPE_RGB24
-            && pvi->bmiHeader.biHeight > 0)
-        {
-           _resultingCapability.height = -(pvi->bmiHeader.biHeight);
-        }
-        else
-        {
-           _resultingCapability.height = abs(pvi->bmiHeader.biHeight);
-        }
-
-        if(*SubType == MEDIASUBTYPE_MJPG
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('M','J','P','G'))
-        {
-          _resultingCapability.videoType = VideoType::kMJPEG;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_I420
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('I','4','2','0'))
-        {
-          _resultingCapability.videoType = VideoType::kI420;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_YUY2
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('Y','U','Y','2'))
-        {
-          _resultingCapability.videoType = VideoType::kYUY2;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_UYVY
-            && pvi->bmiHeader.biCompression == MAKEFOURCC('U','Y','V','Y'))
-        {
-          _resultingCapability.videoType = VideoType::kUYVY;
-          return S_OK;  // This format is acceptable.
-        }
-
-        if(*SubType == MEDIASUBTYPE_HDYC)
-        {
-          _resultingCapability.videoType = VideoType::kUYVY;
-          return S_OK;  // This format is acceptable.
-        }
-        if(*SubType == MEDIASUBTYPE_RGB24
-            && pvi->bmiHeader.biCompression == BI_RGB)
-        {
-          _resultingCapability.videoType = VideoType::kRGB24;
-          return S_OK;  // This format is acceptable.
-        }
-    }
-    return E_INVALIDARG;
-}
-
-HRESULT
-CaptureInputPin::Receive ( IN IMediaSample * pIMediaSample )
-{
-    HRESULT hr = S_OK;
-
-    RTC_DCHECK(m_pFilter);
-    RTC_DCHECK(pIMediaSample);
-
-    // get the thread handle of the delivering thread inc its priority
-    if( _threadHandle == NULL)
-    {
-        HANDLE handle= GetCurrentThread();
-        SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
-        _threadHandle = handle;
-
-        rtc::SetCurrentThreadName("webrtc_video_capture");
-    }
-
-    reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->LockReceive();
-    hr = CBaseInputPin::Receive (pIMediaSample);
-
-    if (SUCCEEDED (hr))
-    {
-        const LONG length = pIMediaSample->GetActualDataLength();
-        RTC_DCHECK(length >= 0);
-
-        unsigned char* pBuffer = NULL;
-        if(S_OK != pIMediaSample->GetPointer(&pBuffer))
-        {
-            reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
-            return S_FALSE;
-        }
-
-        // NOTE: filter unlocked within Send call
-        reinterpret_cast <CaptureSinkFilter *> (m_pFilter)->ProcessCapturedFrame(
-            pBuffer, static_cast<size_t>(length), _resultingCapability);
-    }
-    else
-    {
-        reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
-    }
-
-    return hr;
+  return hr;
 }
 
 // called under LockReceive
 HRESULT CaptureInputPin::SetMatchingMediaType(
-                                    const VideoCaptureCapability& capability)
-{
-
-    _requestedCapability = capability;
-    _resultingCapability = VideoCaptureCapability();
-    return S_OK;
+    const VideoCaptureCapability& capability) {
+  _requestedCapability = capability;
+  _resultingCapability = VideoCaptureCapability();
+  return S_OK;
 }
 //  ----------------------------------------------------------------------------
-CaptureSinkFilter::CaptureSinkFilter (IN TCHAR * tszName,
-                              IN LPUNKNOWN punk,
-                              OUT HRESULT * phr,
-                              VideoCaptureExternal& captureObserver)
-    : CBaseFilter(tszName,punk,& m_crtFilter,CLSID_SINKFILTER),
+CaptureSinkFilter::CaptureSinkFilter(IN TCHAR* tszName,
+                                     IN LPUNKNOWN punk,
+                                     OUT HRESULT* phr,
+                                     VideoCaptureExternal& captureObserver)
+    : CBaseFilter(tszName, punk, &m_crtFilter, CLSID_SINKFILTER),
       m_pInput(NULL),
-      _captureObserver(captureObserver)
-{
-    (* phr) = S_OK;
-    m_pInput = new CaptureInputPin(NAME ("VideoCaptureInputPin"),
-                                   this,
-                                   & m_crtFilter,
-                                   phr, L"VideoCapture");
-    if (m_pInput == NULL || FAILED (* phr))
-    {
-        (* phr) = FAILED (* phr) ? (* phr) : E_OUTOFMEMORY;
-        goto cleanup;
+      _captureObserver(captureObserver) {
+  (*phr) = S_OK;
+  m_pInput = new CaptureInputPin(NAME("VideoCaptureInputPin"), this,
+                                 &m_crtFilter, phr, L"VideoCapture");
+  if (m_pInput == NULL || FAILED(*phr)) {
+    (*phr) = FAILED(*phr) ? (*phr) : E_OUTOFMEMORY;
+    goto cleanup;
+  }
+cleanup:
+  return;
+}
+
+CaptureSinkFilter::~CaptureSinkFilter() {
+  delete m_pInput;
+}
+
+int CaptureSinkFilter::GetPinCount() {
+  return 1;
+}
+
+CBasePin* CaptureSinkFilter::GetPin(IN int Index) {
+  CBasePin* pPin;
+  LockFilter();
+  if (Index == 0) {
+    pPin = m_pInput;
+  } else {
+    pPin = NULL;
+  }
+  UnlockFilter();
+  return pPin;
+}
+
+STDMETHODIMP CaptureSinkFilter::Pause() {
+  LockReceive();
+  LockFilter();
+  if (m_State == State_Stopped) {
+    //  change the state, THEN activate the input pin
+    m_State = State_Paused;
+    if (m_pInput && m_pInput->IsConnected()) {
+      m_pInput->Active();
     }
-    cleanup :
-    return;
-}
-
-CaptureSinkFilter::~CaptureSinkFilter()
-{
-    delete m_pInput;
-}
-
-int CaptureSinkFilter::GetPinCount()
-{
-    return 1;
-}
-
-CBasePin *
-CaptureSinkFilter::GetPin(IN int Index)
-{
-    CBasePin * pPin;
-    LockFilter ();
-    if (Index == 0)
-    {
-        pPin = m_pInput;
+    if (m_pInput && !m_pInput->IsConnected()) {
+      m_State = State_Running;
     }
-    else
-    {
-        pPin = NULL;
-    }
-    UnlockFilter ();
-    return pPin;
+  } else if (m_State == State_Running) {
+    m_State = State_Paused;
+  }
+  UnlockFilter();
+  UnlockReceive();
+  return S_OK;
 }
 
-STDMETHODIMP CaptureSinkFilter::Pause()
-{
-    LockReceive();
-    LockFilter();
-    if (m_State == State_Stopped)
-    {
-        //  change the state, THEN activate the input pin
-        m_State = State_Paused;
-        if (m_pInput && m_pInput->IsConnected())
-        {
-            m_pInput->Active();
-        }
-        if (m_pInput && !m_pInput->IsConnected())
-        {
-            m_State = State_Running;
-        }
-    }
-    else if (m_State == State_Running)
-    {
-        m_State = State_Paused;
-    }
-    UnlockFilter();
-    UnlockReceive();
-    return S_OK;
+STDMETHODIMP CaptureSinkFilter::Stop() {
+  LockReceive();
+  LockFilter();
+
+  //  set the state
+  m_State = State_Stopped;
+
+  //  inactivate the pins
+  if (m_pInput)
+    m_pInput->Inactive();
+
+  UnlockFilter();
+  UnlockReceive();
+  return S_OK;
 }
 
-STDMETHODIMP CaptureSinkFilter::Stop()
-{
-    LockReceive();
-    LockFilter();
-
-    //  set the state
-    m_State = State_Stopped;
-
-    //  inactivate the pins
-    if (m_pInput)
-        m_pInput->Inactive();
-
-    UnlockFilter();
-    UnlockReceive();
-    return S_OK;
-}
-
-void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph)
-{
-    LockFilter();
-    m_pGraph = graph;
-    UnlockFilter();
+void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph) {
+  LockFilter();
+  m_pGraph = graph;
+  UnlockFilter();
 }
 
 void CaptureSinkFilter::ProcessCapturedFrame(
     unsigned char* pBuffer,
     size_t length,
-    const VideoCaptureCapability& frameInfo)
-{
-    //  we have the receiver lock
-    if (m_State == State_Running)
-    {
-        _captureObserver.IncomingFrame(pBuffer, length, frameInfo);
+    const VideoCaptureCapability& frameInfo) {
+  //  we have the receiver lock
+  if (m_State == State_Running) {
+    _captureObserver.IncomingFrame(pBuffer, length, frameInfo);
 
-        // trying to hold it since it's only a memcpy
-        // IMPROVEMENT if this work move critsect
-        UnlockReceive();
-        return;
-    }
+    // trying to hold it since it's only a memcpy
+    // IMPROVEMENT if this work move critsect
     UnlockReceive();
     return;
+  }
+  UnlockReceive();
+  return;
 }
 
 STDMETHODIMP CaptureSinkFilter::SetMatchingMediaType(
-                                        const VideoCaptureCapability& capability)
-{
-    LockReceive();
-    LockFilter();
-    HRESULT hr;
-    if (m_pInput)
-    {
-        hr = m_pInput->SetMatchingMediaType(capability);
-    }
-    else
-    {
-        hr = E_UNEXPECTED;
-    }
-    UnlockFilter();
-    UnlockReceive();
-    return hr;
+    const VideoCaptureCapability& capability) {
+  LockReceive();
+  LockFilter();
+  HRESULT hr;
+  if (m_pInput) {
+    hr = m_pInput->SetMatchingMediaType(capability);
+  } else {
+    hr = E_UNEXPECTED;
+  }
+  UnlockFilter();
+  UnlockReceive();
+  return hr;
 }
 
-STDMETHODIMP CaptureSinkFilter::GetClassID( OUT CLSID * pCLSID )
-{
-    (* pCLSID) = CLSID_SINKFILTER;
-    return S_OK;
+STDMETHODIMP CaptureSinkFilter::GetClassID(OUT CLSID* pCLSID) {
+  (*pCLSID) = CLSID_SINKFILTER;
+  return S_OK;
 }
 
 }  // namespace videocapturemodule
diff --git a/modules/video_capture/windows/video_capture_ds.cc b/modules/video_capture/windows/video_capture_ds.cc
index a9f06f7..aece107 100644
--- a/modules/video_capture/windows/video_capture_ds.cc
+++ b/modules/video_capture/windows/video_capture_ds.cc
@@ -15,378 +15,304 @@
 #include "modules/video_capture/windows/sink_filter_ds.h"
 #include "rtc_base/logging.h"
 
-#include <Dvdmedia.h> // VIDEOINFOHEADER2
+#include <Dvdmedia.h>  // VIDEOINFOHEADER2
 
-namespace webrtc
-{
-namespace videocapturemodule
-{
+namespace webrtc {
+namespace videocapturemodule {
 VideoCaptureDS::VideoCaptureDS()
     : _captureFilter(NULL),
-      _graphBuilder(NULL), _mediaControl(NULL), _sinkFilter(NULL),
-      _inputSendPin(NULL), _outputCapturePin(NULL), _dvFilter(NULL),
-      _inputDvPin(NULL), _outputDvPin(NULL)
-{
+      _graphBuilder(NULL),
+      _mediaControl(NULL),
+      _sinkFilter(NULL),
+      _inputSendPin(NULL),
+      _outputCapturePin(NULL),
+      _dvFilter(NULL),
+      _inputDvPin(NULL),
+      _outputDvPin(NULL) {}
+
+VideoCaptureDS::~VideoCaptureDS() {
+  if (_mediaControl) {
+    _mediaControl->Stop();
+  }
+  if (_graphBuilder) {
+    if (_sinkFilter)
+      _graphBuilder->RemoveFilter(_sinkFilter);
+    if (_captureFilter)
+      _graphBuilder->RemoveFilter(_captureFilter);
+    if (_dvFilter)
+      _graphBuilder->RemoveFilter(_dvFilter);
+  }
+  RELEASE_AND_CLEAR(_inputSendPin);
+  RELEASE_AND_CLEAR(_outputCapturePin);
+
+  RELEASE_AND_CLEAR(_captureFilter);  // release the capture device
+  RELEASE_AND_CLEAR(_sinkFilter);
+  RELEASE_AND_CLEAR(_dvFilter);
+
+  RELEASE_AND_CLEAR(_mediaControl);
+
+  RELEASE_AND_CLEAR(_inputDvPin);
+  RELEASE_AND_CLEAR(_outputDvPin);
+
+  RELEASE_AND_CLEAR(_graphBuilder);
 }
 
-VideoCaptureDS::~VideoCaptureDS()
-{
-    if (_mediaControl)
-    {
-        _mediaControl->Stop();
-    }
-    if (_graphBuilder)
-    {
-        if (_sinkFilter)
-            _graphBuilder->RemoveFilter(_sinkFilter);
-        if (_captureFilter)
-            _graphBuilder->RemoveFilter(_captureFilter);
-        if (_dvFilter)
-            _graphBuilder->RemoveFilter(_dvFilter);
-    }
-    RELEASE_AND_CLEAR(_inputSendPin);
-    RELEASE_AND_CLEAR(_outputCapturePin);
+int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) {
+  const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8);
+  if (nameLength > kVideoCaptureUniqueNameLength)
+    return -1;
 
-    RELEASE_AND_CLEAR(_captureFilter); // release the capture device
-    RELEASE_AND_CLEAR(_sinkFilter);
-    RELEASE_AND_CLEAR(_dvFilter);
+  // Store the device name
+  _deviceUniqueId = new (std::nothrow) char[nameLength + 1];
+  memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
 
-    RELEASE_AND_CLEAR(_mediaControl);
+  if (_dsInfo.Init() != 0)
+    return -1;
 
-    RELEASE_AND_CLEAR(_inputDvPin);
-    RELEASE_AND_CLEAR(_outputDvPin);
+  _captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
+  if (!_captureFilter) {
+    LOG(LS_INFO) << "Failed to create capture filter.";
+    return -1;
+  }
 
-    RELEASE_AND_CLEAR(_graphBuilder);
+  // Get the interface for DirectShow's GraphBuilder
+  HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
+                                IID_IGraphBuilder, (void**)&_graphBuilder);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to create graph builder.";
+    return -1;
+  }
+
+  hr = _graphBuilder->QueryInterface(IID_IMediaControl, (void**)&_mediaControl);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to create media control builder.";
+    return -1;
+  }
+  hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to add the capture device to the graph.";
+    return -1;
+  }
+
+  _outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
+
+  // Create the sink filte used for receiving Captured frames.
+  _sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr, *this);
+  if (hr != S_OK) {
+    LOG(LS_INFO) << "Failed to create send filter";
+    return -1;
+  }
+  _sinkFilter->AddRef();
+
+  hr = _graphBuilder->AddFilter(_sinkFilter, SINK_FILTER_NAME);
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to add the send filter to the graph.";
+    return -1;
+  }
+  _inputSendPin = GetInputPin(_sinkFilter);
+
+  // Temporary connect here.
+  // This is done so that no one else can use the capture device.
+  if (SetCameraOutput(_requestedCapability) != 0) {
+    return -1;
+  }
+  hr = _mediaControl->Pause();
+  if (FAILED(hr)) {
+    LOG(LS_INFO)
+        << "Failed to Pause the Capture device. Is it already occupied? " << hr;
+    return -1;
+  }
+  LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8 << "' initialized.";
+  return 0;
 }
 
-int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8)
-{
-    const int32_t nameLength =
-        (int32_t) strlen((char*) deviceUniqueIdUTF8);
-    if (nameLength > kVideoCaptureUniqueNameLength)
-        return -1;
+int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) {
+  rtc::CritScope cs(&_apiCs);
 
-    // Store the device name
-    _deviceUniqueId = new (std::nothrow) char[nameLength + 1];
-    memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
+  if (capability != _requestedCapability) {
+    DisconnectGraph();
 
-    if (_dsInfo.Init() != 0)
-        return -1;
-
-    _captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
-    if (!_captureFilter)
-    {
-        LOG(LS_INFO) << "Failed to create capture filter.";
-        return -1;
+    if (SetCameraOutput(capability) != 0) {
+      return -1;
     }
-
-    // Get the interface for DirectShow's GraphBuilder
-    HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL,
-                                  CLSCTX_INPROC_SERVER, IID_IGraphBuilder,
-                                  (void **) &_graphBuilder);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to create graph builder.";
-        return -1;
-    }
-
-    hr = _graphBuilder->QueryInterface(IID_IMediaControl,
-                                       (void **) &_mediaControl);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to create media control builder.";
-        return -1;
-    }
-    hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to add the capture device to the graph.";
-        return -1;
-    }
-
-    _outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
-
-    // Create the sink filte used for receiving Captured frames.
-    _sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr,
-                                        *this);
-    if (hr != S_OK)
-    {
-        LOG(LS_INFO) << "Failed to create send filter";
-        return -1;
-    }
-    _sinkFilter->AddRef();
-
-    hr = _graphBuilder->AddFilter(_sinkFilter, SINK_FILTER_NAME);
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to add the send filter to the graph.";
-        return -1;
-    }
-    _inputSendPin = GetInputPin(_sinkFilter);
-
-    // Temporary connect here.
-    // This is done so that no one else can use the capture device.
-    if (SetCameraOutput(_requestedCapability) != 0)
-    {
-        return -1;
-    }
-    hr = _mediaControl->Pause();
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO)
-            << "Failed to Pause the Capture device. Is it already occupied? "
-            << hr;
-        return -1;
-    }
-    LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8
-                 << "' initialized.";
-    return 0;
+  }
+  HRESULT hr = _mediaControl->Run();
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to start the Capture device.";
+    return -1;
+  }
+  return 0;
 }
 
-int32_t VideoCaptureDS::StartCapture(
-                                      const VideoCaptureCapability& capability)
-{
-    rtc::CritScope cs(&_apiCs);
+int32_t VideoCaptureDS::StopCapture() {
+  rtc::CritScope cs(&_apiCs);
 
-    if (capability != _requestedCapability)
-    {
-        DisconnectGraph();
-
-        if (SetCameraOutput(capability) != 0)
-        {
-            return -1;
-        }
-    }
-    HRESULT hr = _mediaControl->Run();
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to start the Capture device.";
-        return -1;
-    }
-    return 0;
+  HRESULT hr = _mediaControl->Pause();
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
+    return -1;
+  }
+  return 0;
 }
-
-int32_t VideoCaptureDS::StopCapture()
-{
-    rtc::CritScope cs(&_apiCs);
-
-    HRESULT hr = _mediaControl->Pause();
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
-        return -1;
-    }
-    return 0;
+bool VideoCaptureDS::CaptureStarted() {
+  OAFilterState state = 0;
+  HRESULT hr = _mediaControl->GetState(1000, &state);
+  if (hr != S_OK && hr != VFW_S_CANT_CUE) {
+    LOG(LS_INFO) << "Failed to get the CaptureStarted status";
+  }
+  LOG(LS_INFO) << "CaptureStarted " << state;
+  return state == State_Running;
 }
-bool VideoCaptureDS::CaptureStarted()
-{
-    OAFilterState state = 0;
-    HRESULT hr = _mediaControl->GetState(1000, &state);
-    if (hr != S_OK && hr != VFW_S_CANT_CUE)
-    {
-        LOG(LS_INFO) << "Failed to get the CaptureStarted status";
-    }
-    LOG(LS_INFO) << "CaptureStarted " << state;
-    return state == State_Running;
-
-}
-int32_t VideoCaptureDS::CaptureSettings(
-                                             VideoCaptureCapability& settings)
-{
-    settings = _requestedCapability;
-    return 0;
+int32_t VideoCaptureDS::CaptureSettings(VideoCaptureCapability& settings) {
+  settings = _requestedCapability;
+  return 0;
 }
 
 int32_t VideoCaptureDS::SetCameraOutput(
-                             const VideoCaptureCapability& requestedCapability)
-{
+    const VideoCaptureCapability& requestedCapability) {
+  // Get the best matching capability
+  VideoCaptureCapability capability;
+  int32_t capabilityIndex;
 
-    // Get the best matching capability
-    VideoCaptureCapability capability;
-    int32_t capabilityIndex;
+  // Store the new requested size
+  _requestedCapability = requestedCapability;
+  // Match the requested capability with the supported.
+  if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(
+           _deviceUniqueId, _requestedCapability, capability)) < 0) {
+    return -1;
+  }
+  // Reduce the frame rate if possible.
+  if (capability.maxFPS > requestedCapability.maxFPS) {
+    capability.maxFPS = requestedCapability.maxFPS;
+  } else if (capability.maxFPS <= 0) {
+    capability.maxFPS = 30;
+  }
 
-    // Store the new requested size
-    _requestedCapability = requestedCapability;
-    // Match the requested capability with the supported.
-    if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(_deviceUniqueId,
-                                                            _requestedCapability,
-                                                            capability)) < 0)
-    {
-        return -1;
-    }
-    //Reduce the frame rate if possible.
-    if (capability.maxFPS > requestedCapability.maxFPS)
-    {
-        capability.maxFPS = requestedCapability.maxFPS;
-    } else if (capability.maxFPS <= 0)
-    {
-        capability.maxFPS = 30;
+  // Convert it to the windows capability index since they are not nexessary
+  // the same
+  VideoCaptureCapabilityWindows windowsCapability;
+  if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0) {
+    return -1;
+  }
+
+  IAMStreamConfig* streamConfig = NULL;
+  AM_MEDIA_TYPE* pmt = NULL;
+  VIDEO_STREAM_CONFIG_CAPS caps;
+
+  HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
+                                                 (void**)&streamConfig);
+  if (hr) {
+    LOG(LS_INFO) << "Can't get the Capture format settings.";
+    return -1;
+  }
+
+  // Get the windows capability from the capture device
+  bool isDVCamera = false;
+  hr = streamConfig->GetStreamCaps(windowsCapability.directShowCapabilityIndex,
+                                   &pmt, reinterpret_cast<BYTE*>(&caps));
+  if (!FAILED(hr)) {
+    if (pmt->formattype == FORMAT_VideoInfo2) {
+      VIDEOINFOHEADER2* h = reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
+      if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
+        h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
+      }
+    } else {
+      VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
+      if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
+        h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
+      }
     }
 
-    // Convert it to the windows capability index since they are not nexessary
-    // the same
-    VideoCaptureCapabilityWindows windowsCapability;
-    if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0)
-    {
-        return -1;
-    }
+    // Set the sink filter to request this capability
+    _sinkFilter->SetMatchingMediaType(capability);
+    // Order the capture device to use this capability
+    hr += streamConfig->SetFormat(pmt);
 
-    IAMStreamConfig* streamConfig = NULL;
-    AM_MEDIA_TYPE *pmt = NULL;
-    VIDEO_STREAM_CONFIG_CAPS caps;
+    // Check if this is a DV camera and we need to add MS DV Filter
+    if (pmt->subtype == MEDIASUBTYPE_dvsl ||
+        pmt->subtype == MEDIASUBTYPE_dvsd || pmt->subtype == MEDIASUBTYPE_dvhd)
+      isDVCamera = true;  // This is a DV camera. Use MS DV filter
+  }
+  RELEASE_AND_CLEAR(streamConfig);
 
-    HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
-                                                   (void**) &streamConfig);
-    if (hr)
-    {
-        LOG(LS_INFO) << "Can't get the Capture format settings.";
-        return -1;
-    }
+  if (FAILED(hr)) {
+    LOG(LS_INFO) << "Failed to set capture device output format";
+    return -1;
+  }
 
-    //Get the windows capability from the capture device
-    bool isDVCamera = false;
-    hr = streamConfig->GetStreamCaps(
-                                    windowsCapability.directShowCapabilityIndex,
-                                    &pmt, reinterpret_cast<BYTE*> (&caps));
-    if (!FAILED(hr))
-    {
-        if (pmt->formattype == FORMAT_VideoInfo2)
-        {
-            VIDEOINFOHEADER2* h =
-                reinterpret_cast<VIDEOINFOHEADER2*> (pmt->pbFormat);
-            if (capability.maxFPS > 0
-                && windowsCapability.supportFrameRateControl)
-            {
-                h->AvgTimePerFrame = REFERENCE_TIME(10000000.0
-                                                    / capability.maxFPS);
-            }
-        }
-        else
-        {
-            VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>
-                                (pmt->pbFormat);
-            if (capability.maxFPS > 0
-                && windowsCapability.supportFrameRateControl)
-            {
-                h->AvgTimePerFrame = REFERENCE_TIME(10000000.0
-                                                    / capability.maxFPS);
-            }
-
-        }
-
-        // Set the sink filter to request this capability
-        _sinkFilter->SetMatchingMediaType(capability);
-        //Order the capture device to use this capability
-        hr += streamConfig->SetFormat(pmt);
-
-        //Check if this is a DV camera and we need to add MS DV Filter
-        if (pmt->subtype == MEDIASUBTYPE_dvsl
-           || pmt->subtype == MEDIASUBTYPE_dvsd
-           || pmt->subtype == MEDIASUBTYPE_dvhd)
-            isDVCamera = true; // This is a DV camera. Use MS DV filter
-    }
-    RELEASE_AND_CLEAR(streamConfig);
-
-    if (FAILED(hr))
-    {
-        LOG(LS_INFO) << "Failed to set capture device output format";
-        return -1;
-    }
-
-    if (isDVCamera)
-    {
-        hr = ConnectDVCamera();
-    }
-    else
-    {
-        hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin,
-                                          NULL);
-    }
-    if (hr != S_OK)
-    {
-        LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
-        return -1;
-    }
-    return 0;
+  if (isDVCamera) {
+    hr = ConnectDVCamera();
+  } else {
+    hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin, NULL);
+  }
+  if (hr != S_OK) {
+    LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
+    return -1;
+  }
+  return 0;
 }
 
-int32_t VideoCaptureDS::DisconnectGraph()
-{
-    HRESULT hr = _mediaControl->Stop();
-    hr += _graphBuilder->Disconnect(_outputCapturePin);
-    hr += _graphBuilder->Disconnect(_inputSendPin);
+int32_t VideoCaptureDS::DisconnectGraph() {
+  HRESULT hr = _mediaControl->Stop();
+  hr += _graphBuilder->Disconnect(_outputCapturePin);
+  hr += _graphBuilder->Disconnect(_inputSendPin);
 
-    //if the DV camera filter exist
-    if (_dvFilter)
-    {
-        _graphBuilder->Disconnect(_inputDvPin);
-        _graphBuilder->Disconnect(_outputDvPin);
-    }
-    if (hr != S_OK)
-    {
-        LOG(LS_ERROR)
-            << "Failed to Stop the Capture device for reconfiguration "
-            << hr;
-        return -1;
-    }
-    return 0;
+  // if the DV camera filter exist
+  if (_dvFilter) {
+    _graphBuilder->Disconnect(_inputDvPin);
+    _graphBuilder->Disconnect(_outputDvPin);
+  }
+  if (hr != S_OK) {
+    LOG(LS_ERROR) << "Failed to Stop the Capture device for reconfiguration "
+                  << hr;
+    return -1;
+  }
+  return 0;
 }
-HRESULT VideoCaptureDS::ConnectDVCamera()
-{
-    HRESULT hr = S_OK;
+HRESULT VideoCaptureDS::ConnectDVCamera() {
+  HRESULT hr = S_OK;
 
-    if (!_dvFilter)
-    {
-        hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
-                              IID_IBaseFilter, (void **) &_dvFilter);
-        if (hr != S_OK)
-        {
-            LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
-            return hr;
-        }
-        hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
-        if (hr != S_OK)
-        {
-            LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
-            return hr;
-        }
-        _inputDvPin = GetInputPin(_dvFilter);
-        if (_inputDvPin == NULL)
-        {
-            LOG(LS_INFO) << "Failed to get input pin from DV decoder";
-            return -1;
-        }
-        _outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
-        if (_outputDvPin == NULL)
-        {
-            LOG(LS_INFO) << "Failed to get output pin from DV decoder";
-            return -1;
-        }
+  if (!_dvFilter) {
+    hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
+                          IID_IBaseFilter, (void**)&_dvFilter);
+    if (hr != S_OK) {
+      LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
+      return hr;
     }
-    hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
-    if (hr != S_OK)
-    {
-        LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
-                     << hr;
-        return hr;
+    hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
+    if (hr != S_OK) {
+      LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
+      return hr;
     }
+    _inputDvPin = GetInputPin(_dvFilter);
+    if (_inputDvPin == NULL) {
+      LOG(LS_INFO) << "Failed to get input pin from DV decoder";
+      return -1;
+    }
+    _outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
+    if (_outputDvPin == NULL) {
+      LOG(LS_INFO) << "Failed to get output pin from DV decoder";
+      return -1;
+    }
+  }
+  hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
+  if (hr != S_OK) {
+    LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
+                 << hr;
+    return hr;
+  }
 
-    hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
-    if (hr != S_OK)
-    {
-        if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES))
-        {
-            LOG(LS_INFO) << "Failed to connect the capture device, busy";
-        }
-        else
-        {
-            LOG(LS_INFO)
-                << "Failed to connect capture device to the send graph: "
-                << hr;
-        }
-        return hr;
+  hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
+  if (hr != S_OK) {
+    if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES)) {
+      LOG(LS_INFO) << "Failed to connect the capture device, busy";
+    } else {
+      LOG(LS_INFO) << "Failed to connect capture device to the send graph: "
+                   << hr;
     }
     return hr;
+  }
+  return hr;
 }
 }  // namespace videocapturemodule
 }  // namespace webrtc